1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.156 2003/06/19 12:47:08 simonmar Exp $
4 * (c) The GHC Team 1998-2003
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
16 #include "StoragePriv.h"
19 #include "SchedAPI.h" // for ReverCAFs prototype
21 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
31 #if defined(GRAN) || defined(PAR)
32 # include "GranSimRts.h"
33 # include "ParallelRts.h"
37 # include "ParallelDebug.h"
42 #if defined(RTS_GTK_FRONTPANEL)
43 #include "FrontPanel.h"
46 #include "RetainerProfile.h"
47 #include "LdvProfile.h"
51 /* STATIC OBJECT LIST.
54 * We maintain a linked list of static objects that are still live.
55 * The requirements for this list are:
57 * - we need to scan the list while adding to it, in order to
58 * scavenge all the static objects (in the same way that
59 * breadth-first scavenging works for dynamic objects).
61 * - we need to be able to tell whether an object is already on
62 * the list, to break loops.
64 * Each static object has a "static link field", which we use for
65 * linking objects on to the list. We use a stack-type list, consing
66 * objects on the front as they are added (this means that the
67 * scavenge phase is depth-first, not breadth-first, but that
70 * A separate list is kept for objects that have been scavenged
71 * already - this is so that we can zero all the marks afterwards.
73 * An object is on the list if its static link field is non-zero; this
74 * means that we have to mark the end of the list with '1', not NULL.
76 * Extra notes for generational GC:
78 * Each generation has a static object list associated with it. When
79 * collecting generations up to N, we treat the static object lists
80 * from generations > N as roots.
82 * We build up a static object list while collecting generations 0..N,
83 * which is then appended to the static object list of generation N+1.
85 static StgClosure* static_objects; // live static objects
86 StgClosure* scavenged_static_objects; // static objects scavenged so far
88 /* N is the oldest generation being collected, where the generations
89 * are numbered starting at 0. A major GC (indicated by the major_gc
90 * flag) is when we're collecting all generations. We only attempt to
91 * deal with static objects and GC CAFs when doing a major GC.
94 static rtsBool major_gc;
96 /* Youngest generation that objects should be evacuated to in
97 * evacuate(). (Logically an argument to evacuate, but it's static
98 * a lot of the time so we optimise it into a global variable).
104 StgWeak *old_weak_ptr_list; // also pending finaliser list
106 /* Which stage of processing various kinds of weak pointer are we at?
107 * (see traverse_weak_ptr_list() below for discussion).
109 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
110 static WeakStage weak_stage;
112 /* List of all threads during GC
114 static StgTSO *old_all_threads;
115 StgTSO *resurrected_threads;
117 /* Flag indicating failure to evacuate an object to the desired
120 static rtsBool failed_to_evac;
122 /* Old to-space (used for two-space collector only)
124 static bdescr *old_to_blocks;
126 /* Data used for allocation area sizing.
128 static lnat new_blocks; // blocks allocated during this GC
129 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
131 /* Used to avoid long recursion due to selector thunks
133 static lnat thunk_selector_depth = 0;
134 #define MAX_THUNK_SELECTOR_DEPTH 8
136 /* -----------------------------------------------------------------------------
137 Static function declarations
138 -------------------------------------------------------------------------- */
140 static bdescr * gc_alloc_block ( step *stp );
141 static void mark_root ( StgClosure **root );
143 // Use a register argument for evacuate, if available.
145 static StgClosure * evacuate (StgClosure *q) __attribute__((regparm(1)));
147 static StgClosure * evacuate (StgClosure *q);
150 static void zero_static_object_list ( StgClosure* first_static );
151 static void zero_mutable_list ( StgMutClosure *first );
153 static rtsBool traverse_weak_ptr_list ( void );
154 static void mark_weak_ptr_list ( StgWeak **list );
156 static StgClosure * eval_thunk_selector ( nat field, StgSelector * p );
159 static void scavenge ( step * );
160 static void scavenge_mark_stack ( void );
161 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
162 static rtsBool scavenge_one ( StgPtr p );
163 static void scavenge_large ( step * );
164 static void scavenge_static ( void );
165 static void scavenge_mutable_list ( generation *g );
166 static void scavenge_mut_once_list ( generation *g );
168 static void scavenge_large_bitmap ( StgPtr p,
169 StgLargeBitmap *large_bitmap,
172 #if 0 && defined(DEBUG)
173 static void gcCAFs ( void );
176 /* -----------------------------------------------------------------------------
177 inline functions etc. for dealing with the mark bitmap & stack.
178 -------------------------------------------------------------------------- */
180 #define MARK_STACK_BLOCKS 4
182 static bdescr *mark_stack_bdescr;
183 static StgPtr *mark_stack;
184 static StgPtr *mark_sp;
185 static StgPtr *mark_splim;
187 // Flag and pointers used for falling back to a linear scan when the
188 // mark stack overflows.
189 static rtsBool mark_stack_overflowed;
190 static bdescr *oldgen_scan_bd;
191 static StgPtr oldgen_scan;
193 static inline rtsBool
194 mark_stack_empty(void)
196 return mark_sp == mark_stack;
199 static inline rtsBool
200 mark_stack_full(void)
202 return mark_sp >= mark_splim;
206 reset_mark_stack(void)
208 mark_sp = mark_stack;
212 push_mark_stack(StgPtr p)
223 /* -----------------------------------------------------------------------------
224 Allocate a new to-space block in the given step.
225 -------------------------------------------------------------------------- */
228 gc_alloc_block(step *stp)
230 bdescr *bd = allocBlock();
231 bd->gen_no = stp->gen_no;
235 // blocks in to-space in generations up to and including N
236 // get the BF_EVACUATED flag.
237 if (stp->gen_no <= N) {
238 bd->flags = BF_EVACUATED;
243 // Start a new to-space block, chain it on after the previous one.
244 if (stp->hp_bd == NULL) {
247 stp->hp_bd->free = stp->hp;
248 stp->hp_bd->link = bd;
253 stp->hpLim = stp->hp + BLOCK_SIZE_W;
261 /* -----------------------------------------------------------------------------
264 Rough outline of the algorithm: for garbage collecting generation N
265 (and all younger generations):
267 - follow all pointers in the root set. the root set includes all
268 mutable objects in all generations (mutable_list and mut_once_list).
270 - for each pointer, evacuate the object it points to into either
272 + to-space of the step given by step->to, which is the next
273 highest step in this generation or the first step in the next
274 generation if this is the last step.
276 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
277 When we evacuate an object we attempt to evacuate
278 everything it points to into the same generation - this is
279 achieved by setting evac_gen to the desired generation. If
280 we can't do this, then an entry in the mut_once list has to
281 be made for the cross-generation pointer.
283 + if the object is already in a generation > N, then leave
286 - repeatedly scavenge to-space from each step in each generation
287 being collected until no more objects can be evacuated.
289 - free from-space in each step, and set from-space = to-space.
291 Locks held: sched_mutex
293 -------------------------------------------------------------------------- */
296 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
300 lnat live, allocated, collected = 0, copied = 0;
301 lnat oldgen_saved_blocks = 0;
305 CostCentreStack *prev_CCS;
308 #if defined(DEBUG) && defined(GRAN)
309 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
313 #if defined(RTS_USER_SIGNALS)
318 // tell the stats department that we've started a GC
321 // Init stats and print par specific (timing) info
322 PAR_TICKY_PAR_START();
324 // attribute any costs to CCS_GC
330 /* Approximate how much we allocated.
331 * Todo: only when generating stats?
333 allocated = calcAllocated();
335 /* Figure out which generation to collect
337 if (force_major_gc) {
338 N = RtsFlags.GcFlags.generations - 1;
342 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
343 if (generations[g].steps[0].n_blocks +
344 generations[g].steps[0].n_large_blocks
345 >= generations[g].max_blocks) {
349 major_gc = (N == RtsFlags.GcFlags.generations-1);
352 #ifdef RTS_GTK_FRONTPANEL
353 if (RtsFlags.GcFlags.frontpanel) {
354 updateFrontPanelBeforeGC(N);
358 // check stack sanity *before* GC (ToDo: check all threads)
360 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
362 IF_DEBUG(sanity, checkFreeListSanity());
364 /* Initialise the static object lists
366 static_objects = END_OF_STATIC_LIST;
367 scavenged_static_objects = END_OF_STATIC_LIST;
369 /* zero the mutable list for the oldest generation (see comment by
370 * zero_mutable_list below).
373 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
376 /* Save the old to-space if we're doing a two-space collection
378 if (RtsFlags.GcFlags.generations == 1) {
379 old_to_blocks = g0s0->to_blocks;
380 g0s0->to_blocks = NULL;
383 /* Keep a count of how many new blocks we allocated during this GC
384 * (used for resizing the allocation area, later).
388 // Initialise to-space in all the generations/steps that we're
391 for (g = 0; g <= N; g++) {
392 generations[g].mut_once_list = END_MUT_LIST;
393 generations[g].mut_list = END_MUT_LIST;
395 for (s = 0; s < generations[g].n_steps; s++) {
397 // generation 0, step 0 doesn't need to-space
398 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
402 stp = &generations[g].steps[s];
403 ASSERT(stp->gen_no == g);
405 // start a new to-space for this step.
408 stp->to_blocks = NULL;
410 // allocate the first to-space block; extra blocks will be
411 // chained on as necessary.
412 bd = gc_alloc_block(stp);
414 stp->scan = bd->start;
417 // initialise the large object queues.
418 stp->new_large_objects = NULL;
419 stp->scavenged_large_objects = NULL;
420 stp->n_scavenged_large_blocks = 0;
422 // mark the large objects as not evacuated yet
423 for (bd = stp->large_objects; bd; bd = bd->link) {
424 bd->flags = BF_LARGE;
427 // for a compacted step, we need to allocate the bitmap
428 if (stp->is_compacted) {
429 nat bitmap_size; // in bytes
430 bdescr *bitmap_bdescr;
433 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
435 if (bitmap_size > 0) {
436 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
438 stp->bitmap = bitmap_bdescr;
439 bitmap = bitmap_bdescr->start;
441 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
442 bitmap_size, bitmap););
444 // don't forget to fill it with zeros!
445 memset(bitmap, 0, bitmap_size);
447 // for each block in this step, point to its bitmap from the
449 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
450 bd->u.bitmap = bitmap;
451 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
458 /* make sure the older generations have at least one block to
459 * allocate into (this makes things easier for copy(), see below).
461 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
462 for (s = 0; s < generations[g].n_steps; s++) {
463 stp = &generations[g].steps[s];
464 if (stp->hp_bd == NULL) {
465 ASSERT(stp->blocks == NULL);
466 bd = gc_alloc_block(stp);
470 /* Set the scan pointer for older generations: remember we
471 * still have to scavenge objects that have been promoted. */
473 stp->scan_bd = stp->hp_bd;
474 stp->to_blocks = NULL;
475 stp->n_to_blocks = 0;
476 stp->new_large_objects = NULL;
477 stp->scavenged_large_objects = NULL;
478 stp->n_scavenged_large_blocks = 0;
482 /* Allocate a mark stack if we're doing a major collection.
485 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
486 mark_stack = (StgPtr *)mark_stack_bdescr->start;
487 mark_sp = mark_stack;
488 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
490 mark_stack_bdescr = NULL;
493 /* -----------------------------------------------------------------------
494 * follow all the roots that we know about:
495 * - mutable lists from each generation > N
496 * we want to *scavenge* these roots, not evacuate them: they're not
497 * going to move in this GC.
498 * Also: do them in reverse generation order. This is because we
499 * often want to promote objects that are pointed to by older
500 * generations early, so we don't have to repeatedly copy them.
501 * Doing the generations in reverse order ensures that we don't end
502 * up in the situation where we want to evac an object to gen 3 and
503 * it has already been evaced to gen 2.
507 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
508 generations[g].saved_mut_list = generations[g].mut_list;
509 generations[g].mut_list = END_MUT_LIST;
512 // Do the mut-once lists first
513 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
514 IF_PAR_DEBUG(verbose,
515 printMutOnceList(&generations[g]));
516 scavenge_mut_once_list(&generations[g]);
518 for (st = generations[g].n_steps-1; st >= 0; st--) {
519 scavenge(&generations[g].steps[st]);
523 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
524 IF_PAR_DEBUG(verbose,
525 printMutableList(&generations[g]));
526 scavenge_mutable_list(&generations[g]);
528 for (st = generations[g].n_steps-1; st >= 0; st--) {
529 scavenge(&generations[g].steps[st]);
534 /* follow roots from the CAF list (used by GHCi)
539 /* follow all the roots that the application knows about.
542 get_roots(mark_root);
545 /* And don't forget to mark the TSO if we got here direct from
547 /* Not needed in a seq version?
549 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
553 // Mark the entries in the GALA table of the parallel system
554 markLocalGAs(major_gc);
555 // Mark all entries on the list of pending fetches
556 markPendingFetches(major_gc);
559 /* Mark the weak pointer list, and prepare to detect dead weak
562 mark_weak_ptr_list(&weak_ptr_list);
563 old_weak_ptr_list = weak_ptr_list;
564 weak_ptr_list = NULL;
565 weak_stage = WeakPtrs;
567 /* The all_threads list is like the weak_ptr_list.
568 * See traverse_weak_ptr_list() for the details.
570 old_all_threads = all_threads;
571 all_threads = END_TSO_QUEUE;
572 resurrected_threads = END_TSO_QUEUE;
574 /* Mark the stable pointer table.
576 markStablePtrTable(mark_root);
580 /* ToDo: To fix the caf leak, we need to make the commented out
581 * parts of this code do something sensible - as described in
584 extern void markHugsObjects(void);
589 /* -------------------------------------------------------------------------
590 * Repeatedly scavenge all the areas we know about until there's no
591 * more scavenging to be done.
598 // scavenge static objects
599 if (major_gc && static_objects != END_OF_STATIC_LIST) {
600 IF_DEBUG(sanity, checkStaticObjects(static_objects));
604 /* When scavenging the older generations: Objects may have been
605 * evacuated from generations <= N into older generations, and we
606 * need to scavenge these objects. We're going to try to ensure that
607 * any evacuations that occur move the objects into at least the
608 * same generation as the object being scavenged, otherwise we
609 * have to create new entries on the mutable list for the older
613 // scavenge each step in generations 0..maxgen
619 // scavenge objects in compacted generation
620 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
621 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
622 scavenge_mark_stack();
626 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
627 for (st = generations[gen].n_steps; --st >= 0; ) {
628 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
631 stp = &generations[gen].steps[st];
633 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
638 if (stp->new_large_objects != NULL) {
647 if (flag) { goto loop; }
649 // must be last... invariant is that everything is fully
650 // scavenged at this point.
651 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
656 /* Update the pointers from the "main thread" list - these are
657 * treated as weak pointers because we want to allow a main thread
658 * to get a BlockedOnDeadMVar exception in the same way as any other
659 * thread. Note that the threads should all have been retained by
660 * GC by virtue of being on the all_threads list, we're just
661 * updating pointers here.
666 for (m = main_threads; m != NULL; m = m->link) {
667 tso = (StgTSO *) isAlive((StgClosure *)m->tso);
669 barf("main thread has been GC'd");
676 // Reconstruct the Global Address tables used in GUM
677 rebuildGAtables(major_gc);
678 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
681 // Now see which stable names are still alive.
684 // Tidy the end of the to-space chains
685 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
686 for (s = 0; s < generations[g].n_steps; s++) {
687 stp = &generations[g].steps[s];
688 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
689 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
690 stp->hp_bd->free = stp->hp;
696 // We call processHeapClosureForDead() on every closure destroyed during
697 // the current garbage collection, so we invoke LdvCensusForDead().
698 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
699 || RtsFlags.ProfFlags.bioSelector != NULL)
703 // NO MORE EVACUATION AFTER THIS POINT!
704 // Finally: compaction of the oldest generation.
705 if (major_gc && oldest_gen->steps[0].is_compacted) {
706 // save number of blocks for stats
707 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
711 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
713 /* run through all the generations/steps and tidy up
715 copied = new_blocks * BLOCK_SIZE_W;
716 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
719 generations[g].collections++; // for stats
722 for (s = 0; s < generations[g].n_steps; s++) {
724 stp = &generations[g].steps[s];
726 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
727 // stats information: how much we copied
729 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
734 // for generations we collected...
737 // rough calculation of garbage collected, for stats output
738 if (stp->is_compacted) {
739 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
741 collected += stp->n_blocks * BLOCK_SIZE_W;
744 /* free old memory and shift to-space into from-space for all
745 * the collected steps (except the allocation area). These
746 * freed blocks will probaby be quickly recycled.
748 if (!(g == 0 && s == 0)) {
749 if (stp->is_compacted) {
750 // for a compacted step, just shift the new to-space
751 // onto the front of the now-compacted existing blocks.
752 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
753 bd->flags &= ~BF_EVACUATED; // now from-space
755 // tack the new blocks on the end of the existing blocks
756 if (stp->blocks == NULL) {
757 stp->blocks = stp->to_blocks;
759 for (bd = stp->blocks; bd != NULL; bd = next) {
762 bd->link = stp->to_blocks;
766 // add the new blocks to the block tally
767 stp->n_blocks += stp->n_to_blocks;
769 freeChain(stp->blocks);
770 stp->blocks = stp->to_blocks;
771 stp->n_blocks = stp->n_to_blocks;
772 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
773 bd->flags &= ~BF_EVACUATED; // now from-space
776 stp->to_blocks = NULL;
777 stp->n_to_blocks = 0;
780 /* LARGE OBJECTS. The current live large objects are chained on
781 * scavenged_large, having been moved during garbage
782 * collection from large_objects. Any objects left on
783 * large_objects list are therefore dead, so we free them here.
785 for (bd = stp->large_objects; bd != NULL; bd = next) {
791 // update the count of blocks used by large objects
792 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
793 bd->flags &= ~BF_EVACUATED;
795 stp->large_objects = stp->scavenged_large_objects;
796 stp->n_large_blocks = stp->n_scavenged_large_blocks;
799 // for older generations...
801 /* For older generations, we need to append the
802 * scavenged_large_object list (i.e. large objects that have been
803 * promoted during this GC) to the large_object list for that step.
805 for (bd = stp->scavenged_large_objects; bd; bd = next) {
807 bd->flags &= ~BF_EVACUATED;
808 dbl_link_onto(bd, &stp->large_objects);
811 // add the new blocks we promoted during this GC
812 stp->n_blocks += stp->n_to_blocks;
813 stp->n_to_blocks = 0;
814 stp->n_large_blocks += stp->n_scavenged_large_blocks;
819 /* Reset the sizes of the older generations when we do a major
822 * CURRENT STRATEGY: make all generations except zero the same size.
823 * We have to stay within the maximum heap size, and leave a certain
824 * percentage of the maximum heap size available to allocate into.
826 if (major_gc && RtsFlags.GcFlags.generations > 1) {
827 nat live, size, min_alloc;
828 nat max = RtsFlags.GcFlags.maxHeapSize;
829 nat gens = RtsFlags.GcFlags.generations;
831 // live in the oldest generations
832 live = oldest_gen->steps[0].n_blocks +
833 oldest_gen->steps[0].n_large_blocks;
835 // default max size for all generations except zero
836 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
837 RtsFlags.GcFlags.minOldGenSize);
839 // minimum size for generation zero
840 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
841 RtsFlags.GcFlags.minAllocAreaSize);
843 // Auto-enable compaction when the residency reaches a
844 // certain percentage of the maximum heap size (default: 30%).
845 if (RtsFlags.GcFlags.generations > 1 &&
846 (RtsFlags.GcFlags.compact ||
848 oldest_gen->steps[0].n_blocks >
849 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
850 oldest_gen->steps[0].is_compacted = 1;
851 // fprintf(stderr,"compaction: on\n", live);
853 oldest_gen->steps[0].is_compacted = 0;
854 // fprintf(stderr,"compaction: off\n", live);
857 // if we're going to go over the maximum heap size, reduce the
858 // size of the generations accordingly. The calculation is
859 // different if compaction is turned on, because we don't need
860 // to double the space required to collect the old generation.
863 // this test is necessary to ensure that the calculations
864 // below don't have any negative results - we're working
865 // with unsigned values here.
866 if (max < min_alloc) {
870 if (oldest_gen->steps[0].is_compacted) {
871 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
872 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
875 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
876 size = (max - min_alloc) / ((gens - 1) * 2);
886 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
887 min_alloc, size, max);
890 for (g = 0; g < gens; g++) {
891 generations[g].max_blocks = size;
895 // Guess the amount of live data for stats.
898 /* Free the small objects allocated via allocate(), since this will
899 * all have been copied into G0S1 now.
901 if (small_alloc_list != NULL) {
902 freeChain(small_alloc_list);
904 small_alloc_list = NULL;
908 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
910 // Start a new pinned_object_block
911 pinned_object_block = NULL;
913 /* Free the mark stack.
915 if (mark_stack_bdescr != NULL) {
916 freeGroup(mark_stack_bdescr);
921 for (g = 0; g <= N; g++) {
922 for (s = 0; s < generations[g].n_steps; s++) {
923 stp = &generations[g].steps[s];
924 if (stp->is_compacted && stp->bitmap != NULL) {
925 freeGroup(stp->bitmap);
930 /* Two-space collector:
931 * Free the old to-space, and estimate the amount of live data.
933 if (RtsFlags.GcFlags.generations == 1) {
936 if (old_to_blocks != NULL) {
937 freeChain(old_to_blocks);
939 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
940 bd->flags = 0; // now from-space
943 /* For a two-space collector, we need to resize the nursery. */
945 /* set up a new nursery. Allocate a nursery size based on a
946 * function of the amount of live data (by default a factor of 2)
947 * Use the blocks from the old nursery if possible, freeing up any
950 * If we get near the maximum heap size, then adjust our nursery
951 * size accordingly. If the nursery is the same size as the live
952 * data (L), then we need 3L bytes. We can reduce the size of the
953 * nursery to bring the required memory down near 2L bytes.
955 * A normal 2-space collector would need 4L bytes to give the same
956 * performance we get from 3L bytes, reducing to the same
957 * performance at 2L bytes.
959 blocks = g0s0->n_to_blocks;
961 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
962 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
963 RtsFlags.GcFlags.maxHeapSize ) {
964 long adjusted_blocks; // signed on purpose
967 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
968 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
969 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
970 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
973 blocks = adjusted_blocks;
976 blocks *= RtsFlags.GcFlags.oldGenFactor;
977 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
978 blocks = RtsFlags.GcFlags.minAllocAreaSize;
981 resizeNursery(blocks);
984 /* Generational collector:
985 * If the user has given us a suggested heap size, adjust our
986 * allocation area to make best use of the memory available.
989 if (RtsFlags.GcFlags.heapSizeSuggestion) {
991 nat needed = calcNeeded(); // approx blocks needed at next GC
993 /* Guess how much will be live in generation 0 step 0 next time.
994 * A good approximation is obtained by finding the
995 * percentage of g0s0 that was live at the last minor GC.
998 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
1001 /* Estimate a size for the allocation area based on the
1002 * information available. We might end up going slightly under
1003 * or over the suggested heap size, but we should be pretty
1006 * Formula: suggested - needed
1007 * ----------------------------
1008 * 1 + g0s0_pcnt_kept/100
1010 * where 'needed' is the amount of memory needed at the next
1011 * collection for collecting all steps except g0s0.
1014 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
1015 (100 + (long)g0s0_pcnt_kept);
1017 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
1018 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1021 resizeNursery((nat)blocks);
1024 // we might have added extra large blocks to the nursery, so
1025 // resize back to minAllocAreaSize again.
1026 resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
1030 // mark the garbage collected CAFs as dead
1031 #if 0 && defined(DEBUG) // doesn't work at the moment
1032 if (major_gc) { gcCAFs(); }
1036 // resetStaticObjectForRetainerProfiling() must be called before
1038 resetStaticObjectForRetainerProfiling();
1041 // zero the scavenged static object list
1043 zero_static_object_list(scavenged_static_objects);
1046 // Reset the nursery
1049 RELEASE_LOCK(&sched_mutex);
1051 // start any pending finalizers
1052 scheduleFinalizers(old_weak_ptr_list);
1054 // send exceptions to any threads which were about to die
1055 resurrectThreads(resurrected_threads);
1057 ACQUIRE_LOCK(&sched_mutex);
1059 // Update the stable pointer hash table.
1060 updateStablePtrTable(major_gc);
1062 // check sanity after GC
1063 IF_DEBUG(sanity, checkSanity());
1065 // extra GC trace info
1066 IF_DEBUG(gc, statDescribeGens());
1069 // symbol-table based profiling
1070 /* heapCensus(to_blocks); */ /* ToDo */
1073 // restore enclosing cost centre
1078 // check for memory leaks if sanity checking is on
1079 IF_DEBUG(sanity, memInventory());
1081 #ifdef RTS_GTK_FRONTPANEL
1082 if (RtsFlags.GcFlags.frontpanel) {
1083 updateFrontPanelAfterGC( N, live );
1087 // ok, GC over: tell the stats department what happened.
1088 stat_endGC(allocated, collected, live, copied, N);
1090 #if defined(RTS_USER_SIGNALS)
1091 // unblock signals again
1092 unblockUserSignals();
1099 /* -----------------------------------------------------------------------------
1102 traverse_weak_ptr_list is called possibly many times during garbage
1103 collection. It returns a flag indicating whether it did any work
1104 (i.e. called evacuate on any live pointers).
1106 Invariant: traverse_weak_ptr_list is called when the heap is in an
1107 idempotent state. That means that there are no pending
1108 evacuate/scavenge operations. This invariant helps the weak
1109 pointer code decide which weak pointers are dead - if there are no
1110 new live weak pointers, then all the currently unreachable ones are
1113 For generational GC: we just don't try to finalize weak pointers in
1114 older generations than the one we're collecting. This could
1115 probably be optimised by keeping per-generation lists of weak
1116 pointers, but for a few weak pointers this scheme will work.
1118 There are three distinct stages to processing weak pointers:
1120 - weak_stage == WeakPtrs
1122 We process all the weak pointers whos keys are alive (evacuate
1123 their values and finalizers), and repeat until we can find no new
1124 live keys. If no live keys are found in this pass, then we
1125 evacuate the finalizers of all the dead weak pointers in order to
1128 - weak_stage == WeakThreads
1130 Now, we discover which *threads* are still alive. Pointers to
1131 threads from the all_threads and main thread lists are the
1132 weakest of all: a pointers from the finalizer of a dead weak
1133 pointer can keep a thread alive. Any threads found to be unreachable
1134 are evacuated and placed on the resurrected_threads list so we
1135 can send them a signal later.
1137 - weak_stage == WeakDone
1139 No more evacuation is done.
1141 -------------------------------------------------------------------------- */
1144 traverse_weak_ptr_list(void)
1146 StgWeak *w, **last_w, *next_w;
1148 rtsBool flag = rtsFalse;
1150 switch (weak_stage) {
1156 /* doesn't matter where we evacuate values/finalizers to, since
1157 * these pointers are treated as roots (iff the keys are alive).
1161 last_w = &old_weak_ptr_list;
1162 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1164 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1165 * called on a live weak pointer object. Just remove it.
1167 if (w->header.info == &stg_DEAD_WEAK_info) {
1168 next_w = ((StgDeadWeak *)w)->link;
1173 switch (get_itbl(w)->type) {
1176 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1181 /* Now, check whether the key is reachable.
1183 new = isAlive(w->key);
1186 // evacuate the value and finalizer
1187 w->value = evacuate(w->value);
1188 w->finalizer = evacuate(w->finalizer);
1189 // remove this weak ptr from the old_weak_ptr list
1191 // and put it on the new weak ptr list
1193 w->link = weak_ptr_list;
1196 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p",
1201 last_w = &(w->link);
1207 barf("traverse_weak_ptr_list: not WEAK");
1211 /* If we didn't make any changes, then we can go round and kill all
1212 * the dead weak pointers. The old_weak_ptr list is used as a list
1213 * of pending finalizers later on.
1215 if (flag == rtsFalse) {
1216 for (w = old_weak_ptr_list; w; w = w->link) {
1217 w->finalizer = evacuate(w->finalizer);
1220 // Next, move to the WeakThreads stage after fully
1221 // scavenging the finalizers we've just evacuated.
1222 weak_stage = WeakThreads;
1228 /* Now deal with the all_threads list, which behaves somewhat like
1229 * the weak ptr list. If we discover any threads that are about to
1230 * become garbage, we wake them up and administer an exception.
1233 StgTSO *t, *tmp, *next, **prev;
1235 prev = &old_all_threads;
1236 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1238 (StgClosure *)tmp = isAlive((StgClosure *)t);
1244 ASSERT(get_itbl(t)->type == TSO);
1245 switch (t->what_next) {
1246 case ThreadRelocated:
1251 case ThreadComplete:
1252 // finshed or died. The thread might still be alive, but we
1253 // don't keep it on the all_threads list. Don't forget to
1254 // stub out its global_link field.
1255 next = t->global_link;
1256 t->global_link = END_TSO_QUEUE;
1264 // not alive (yet): leave this thread on the
1265 // old_all_threads list.
1266 prev = &(t->global_link);
1267 next = t->global_link;
1270 // alive: move this thread onto the all_threads list.
1271 next = t->global_link;
1272 t->global_link = all_threads;
1279 /* And resurrect any threads which were about to become garbage.
1282 StgTSO *t, *tmp, *next;
1283 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1284 next = t->global_link;
1285 (StgClosure *)tmp = evacuate((StgClosure *)t);
1286 tmp->global_link = resurrected_threads;
1287 resurrected_threads = tmp;
1291 weak_stage = WeakDone; // *now* we're done,
1292 return rtsTrue; // but one more round of scavenging, please
1295 barf("traverse_weak_ptr_list");
1300 /* -----------------------------------------------------------------------------
1301 After GC, the live weak pointer list may have forwarding pointers
1302 on it, because a weak pointer object was evacuated after being
1303 moved to the live weak pointer list. We remove those forwarding
1306 Also, we don't consider weak pointer objects to be reachable, but
1307 we must nevertheless consider them to be "live" and retain them.
1308 Therefore any weak pointer objects which haven't as yet been
1309 evacuated need to be evacuated now.
1310 -------------------------------------------------------------------------- */
1314 mark_weak_ptr_list ( StgWeak **list )
1316 StgWeak *w, **last_w;
1319 for (w = *list; w; w = w->link) {
1320 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
1321 ASSERT(w->header.info == &stg_DEAD_WEAK_info
1322 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
1323 (StgClosure *)w = evacuate((StgClosure *)w);
1325 last_w = &(w->link);
1329 /* -----------------------------------------------------------------------------
1330 isAlive determines whether the given closure is still alive (after
1331 a garbage collection) or not. It returns the new address of the
1332 closure if it is alive, or NULL otherwise.
1334 NOTE: Use it before compaction only!
1335 -------------------------------------------------------------------------- */
1339 isAlive(StgClosure *p)
1341 const StgInfoTable *info;
1346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1349 // ignore static closures
1351 // ToDo: for static closures, check the static link field.
1352 // Problem here is that we sometimes don't set the link field, eg.
1353 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1355 if (!HEAP_ALLOCED(p)) {
1359 // ignore closures in generations that we're not collecting.
1361 if (bd->gen_no > N) {
1365 // if it's a pointer into to-space, then we're done
1366 if (bd->flags & BF_EVACUATED) {
1370 // large objects use the evacuated flag
1371 if (bd->flags & BF_LARGE) {
1375 // check the mark bit for compacted steps
1376 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1380 switch (info->type) {
1385 case IND_OLDGEN: // rely on compatible layout with StgInd
1386 case IND_OLDGEN_PERM:
1387 // follow indirections
1388 p = ((StgInd *)p)->indirectee;
1393 return ((StgEvacuated *)p)->evacuee;
1396 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1397 p = (StgClosure *)((StgTSO *)p)->link;
1410 mark_root(StgClosure **root)
1412 *root = evacuate(*root);
1415 static __inline__ void
1416 upd_evacuee(StgClosure *p, StgClosure *dest)
1418 // Source object must be in from-space:
1419 ASSERT((Bdescr((P_)p)->flags & BF_EVACUATED) == 0);
1420 // not true: (ToDo: perhaps it should be)
1421 // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
1422 p->header.info = &stg_EVACUATED_info;
1423 ((StgEvacuated *)p)->evacuee = dest;
1427 static __inline__ StgClosure *
1428 copy(StgClosure *src, nat size, step *stp)
1433 nat size_org = size;
1436 TICK_GC_WORDS_COPIED(size);
1437 /* Find out where we're going, using the handy "to" pointer in
1438 * the step of the source object. If it turns out we need to
1439 * evacuate to an older generation, adjust it here (see comment
1442 if (stp->gen_no < evac_gen) {
1443 #ifdef NO_EAGER_PROMOTION
1444 failed_to_evac = rtsTrue;
1446 stp = &generations[evac_gen].steps[0];
1450 /* chain a new block onto the to-space for the destination step if
1453 if (stp->hp + size >= stp->hpLim) {
1454 gc_alloc_block(stp);
1457 for(to = stp->hp, from = (P_)src; size>0; --size) {
1463 upd_evacuee(src,(StgClosure *)dest);
1465 // We store the size of the just evacuated object in the LDV word so that
1466 // the profiler can guess the position of the next object later.
1467 SET_EVACUAEE_FOR_LDV(src, size_org);
1469 return (StgClosure *)dest;
1472 /* Special version of copy() for when we only want to copy the info
1473 * pointer of an object, but reserve some padding after it. This is
1474 * used to optimise evacuation of BLACKHOLEs.
1479 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1484 nat size_to_copy_org = size_to_copy;
1487 TICK_GC_WORDS_COPIED(size_to_copy);
1488 if (stp->gen_no < evac_gen) {
1489 #ifdef NO_EAGER_PROMOTION
1490 failed_to_evac = rtsTrue;
1492 stp = &generations[evac_gen].steps[0];
1496 if (stp->hp + size_to_reserve >= stp->hpLim) {
1497 gc_alloc_block(stp);
1500 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1505 stp->hp += size_to_reserve;
1506 upd_evacuee(src,(StgClosure *)dest);
1508 // We store the size of the just evacuated object in the LDV word so that
1509 // the profiler can guess the position of the next object later.
1510 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1512 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1514 if (size_to_reserve - size_to_copy_org > 0)
1515 FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1517 return (StgClosure *)dest;
1521 /* -----------------------------------------------------------------------------
1522 Evacuate a large object
1524 This just consists of removing the object from the (doubly-linked)
1525 step->large_objects list, and linking it on to the (singly-linked)
1526 step->new_large_objects list, from where it will be scavenged later.
1528 Convention: bd->flags has BF_EVACUATED set for a large object
1529 that has been evacuated, or unset otherwise.
1530 -------------------------------------------------------------------------- */
1534 evacuate_large(StgPtr p)
1536 bdescr *bd = Bdescr(p);
1539 // object must be at the beginning of the block (or be a ByteArray)
1540 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1541 (((W_)p & BLOCK_MASK) == 0));
1543 // already evacuated?
1544 if (bd->flags & BF_EVACUATED) {
1545 /* Don't forget to set the failed_to_evac flag if we didn't get
1546 * the desired destination (see comments in evacuate()).
1548 if (bd->gen_no < evac_gen) {
1549 failed_to_evac = rtsTrue;
1550 TICK_GC_FAILED_PROMOTION();
1556 // remove from large_object list
1558 bd->u.back->link = bd->link;
1559 } else { // first object in the list
1560 stp->large_objects = bd->link;
1563 bd->link->u.back = bd->u.back;
1566 /* link it on to the evacuated large object list of the destination step
1569 if (stp->gen_no < evac_gen) {
1570 #ifdef NO_EAGER_PROMOTION
1571 failed_to_evac = rtsTrue;
1573 stp = &generations[evac_gen].steps[0];
1578 bd->gen_no = stp->gen_no;
1579 bd->link = stp->new_large_objects;
1580 stp->new_large_objects = bd;
1581 bd->flags |= BF_EVACUATED;
1584 /* -----------------------------------------------------------------------------
1585 Adding a MUT_CONS to an older generation.
1587 This is necessary from time to time when we end up with an
1588 old-to-new generation pointer in a non-mutable object. We defer
1589 the promotion until the next GC.
1590 -------------------------------------------------------------------------- */
1593 mkMutCons(StgClosure *ptr, generation *gen)
1598 stp = &gen->steps[0];
1600 /* chain a new block onto the to-space for the destination step if
1603 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1604 gc_alloc_block(stp);
1607 q = (StgMutVar *)stp->hp;
1608 stp->hp += sizeofW(StgMutVar);
1610 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1612 recordOldToNewPtrs((StgMutClosure *)q);
1614 return (StgClosure *)q;
1617 /* -----------------------------------------------------------------------------
1620 This is called (eventually) for every live object in the system.
1622 The caller to evacuate specifies a desired generation in the
1623 evac_gen global variable. The following conditions apply to
1624 evacuating an object which resides in generation M when we're
1625 collecting up to generation N
1629 else evac to step->to
1631 if M < evac_gen evac to evac_gen, step 0
1633 if the object is already evacuated, then we check which generation
1636 if M >= evac_gen do nothing
1637 if M < evac_gen set failed_to_evac flag to indicate that we
1638 didn't manage to evacuate this object into evac_gen.
1643 evacuate() is the single most important function performance-wise
1644 in the GC. Various things have been tried to speed it up, but as
1645 far as I can tell the code generated by gcc 3.2 with -O2 is about
1646 as good as it's going to get. We pass the argument to evacuate()
1647 in a register using the 'regparm' attribute (see the prototype for
1648 evacuate() near the top of this file).
1650 Changing evacuate() to take an (StgClosure **) rather than
1651 returning the new pointer seems attractive, because we can avoid
1652 writing back the pointer when it hasn't changed (eg. for a static
1653 object, or an object in a generation > N). However, I tried it and
1654 it doesn't help. One reason is that the (StgClosure **) pointer
1655 gets spilled to the stack inside evacuate(), resulting in far more
1656 extra reads/writes than we save.
1657 -------------------------------------------------------------------------- */
1660 evacuate(StgClosure *q)
1665 const StgInfoTable *info;
1668 if (HEAP_ALLOCED(q)) {
1671 if (bd->gen_no > N) {
1672 /* Can't evacuate this object, because it's in a generation
1673 * older than the ones we're collecting. Let's hope that it's
1674 * in evac_gen or older, or we will have to arrange to track
1675 * this pointer using the mutable list.
1677 if (bd->gen_no < evac_gen) {
1679 failed_to_evac = rtsTrue;
1680 TICK_GC_FAILED_PROMOTION();
1685 /* evacuate large objects by re-linking them onto a different list.
1687 if (bd->flags & BF_LARGE) {
1689 if (info->type == TSO &&
1690 ((StgTSO *)q)->what_next == ThreadRelocated) {
1691 q = (StgClosure *)((StgTSO *)q)->link;
1694 evacuate_large((P_)q);
1698 /* If the object is in a step that we're compacting, then we
1699 * need to use an alternative evacuate procedure.
1701 if (bd->step->is_compacted) {
1702 if (!is_marked((P_)q,bd)) {
1704 if (mark_stack_full()) {
1705 mark_stack_overflowed = rtsTrue;
1708 push_mark_stack((P_)q);
1716 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1719 // make sure the info pointer is into text space
1720 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
1723 switch (info -> type) {
1727 return copy(q,sizeW_fromITBL(info),stp);
1731 StgWord w = (StgWord)q->payload[0];
1732 if (q->header.info == Czh_con_info &&
1733 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1734 (StgChar)w <= MAX_CHARLIKE) {
1735 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1737 if (q->header.info == Izh_con_info &&
1738 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1739 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1741 // else, fall through ...
1747 return copy(q,sizeofW(StgHeader)+1,stp);
1749 case THUNK_1_0: // here because of MIN_UPD_SIZE
1754 #ifdef NO_PROMOTE_THUNKS
1755 if (bd->gen_no == 0 &&
1756 bd->step->no != 0 &&
1757 bd->step->no == generations[bd->gen_no].n_steps-1) {
1761 return copy(q,sizeofW(StgHeader)+2,stp);
1769 return copy(q,sizeofW(StgHeader)+2,stp);
1775 case IND_OLDGEN_PERM:
1779 return copy(q,sizeW_fromITBL(info),stp);
1782 return copy(q,bco_sizeW((StgBCO *)q),stp);
1785 case SE_CAF_BLACKHOLE:
1788 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1791 to = copy(q,BLACKHOLE_sizeW(),stp);
1794 case THUNK_SELECTOR:
1798 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
1799 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1802 p = eval_thunk_selector(info->layout.selector_offset,
1806 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1808 // q is still BLACKHOLE'd.
1809 thunk_selector_depth++;
1811 thunk_selector_depth--;
1814 // We store the size of the just evacuated object in the
1815 // LDV word so that the profiler can guess the position of
1816 // the next object later.
1817 SET_EVACUAEE_FOR_LDV(q, THUNK_SELECTOR_sizeW());
1825 // follow chains of indirections, don't evacuate them
1826 q = ((StgInd*)q)->indirectee;
1830 if (info->srt_bitmap != 0 && major_gc &&
1831 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1832 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1833 static_objects = (StgClosure *)q;
1838 if (info->srt_bitmap != 0 && major_gc &&
1839 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1840 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1841 static_objects = (StgClosure *)q;
1846 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1847 * on the CAF list, so don't do anything with it here (we'll
1848 * scavenge it later).
1851 && ((StgIndStatic *)q)->saved_info == NULL
1852 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1853 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1854 static_objects = (StgClosure *)q;
1859 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1860 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1861 static_objects = (StgClosure *)q;
1865 case CONSTR_INTLIKE:
1866 case CONSTR_CHARLIKE:
1867 case CONSTR_NOCAF_STATIC:
1868 /* no need to put these on the static linked list, they don't need
1882 // shouldn't see these
1883 barf("evacuate: stack frame at %p\n", q);
1887 return copy(q,pap_sizeW((StgPAP*)q),stp);
1890 return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
1893 /* Already evacuated, just return the forwarding address.
1894 * HOWEVER: if the requested destination generation (evac_gen) is
1895 * older than the actual generation (because the object was
1896 * already evacuated to a younger generation) then we have to
1897 * set the failed_to_evac flag to indicate that we couldn't
1898 * manage to promote the object to the desired generation.
1900 if (evac_gen > 0) { // optimisation
1901 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1902 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
1903 failed_to_evac = rtsTrue;
1904 TICK_GC_FAILED_PROMOTION();
1907 return ((StgEvacuated*)q)->evacuee;
1910 // just copy the block
1911 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1914 case MUT_ARR_PTRS_FROZEN:
1915 // just copy the block
1916 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1920 StgTSO *tso = (StgTSO *)q;
1922 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1924 if (tso->what_next == ThreadRelocated) {
1925 q = (StgClosure *)tso->link;
1929 /* To evacuate a small TSO, we need to relocate the update frame
1936 new_tso = (StgTSO *)copyPart((StgClosure *)tso,
1938 sizeofW(StgTSO), stp);
1939 move_TSO(tso, new_tso);
1940 for (p = tso->sp, q = new_tso->sp;
1941 p < tso->stack+tso->stack_size;) {
1945 return (StgClosure *)new_tso;
1950 case RBH: // cf. BLACKHOLE_BQ
1952 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1953 to = copy(q,BLACKHOLE_sizeW(),stp);
1954 //ToDo: derive size etc from reverted IP
1955 //to = copy(q,size,stp);
1957 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1958 q, info_type(q), to, info_type(to)));
1963 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1964 to = copy(q,sizeofW(StgBlockedFetch),stp);
1966 belch("@@ evacuate: %p (%s) to %p (%s)",
1967 q, info_type(q), to, info_type(to)));
1974 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1975 to = copy(q,sizeofW(StgFetchMe),stp);
1977 belch("@@ evacuate: %p (%s) to %p (%s)",
1978 q, info_type(q), to, info_type(to)));
1982 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1983 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1985 belch("@@ evacuate: %p (%s) to %p (%s)",
1986 q, info_type(q), to, info_type(to)));
1991 barf("evacuate: strange closure type %d", (int)(info->type));
1997 /* -----------------------------------------------------------------------------
1998 Evaluate a THUNK_SELECTOR if possible.
2000 returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
2001 a closure pointer if we evaluated it and this is the result. Note
2002 that "evaluating" the THUNK_SELECTOR doesn't necessarily mean
2003 reducing it to HNF, just that we have eliminated the selection.
2004 The result might be another thunk, or even another THUNK_SELECTOR.
2006 If the return value is non-NULL, the original selector thunk has
2007 been BLACKHOLE'd, and should be updated with an indirection or a
2008 forwarding pointer. If the return value is NULL, then the selector
2010 -------------------------------------------------------------------------- */
2013 eval_thunk_selector( nat field, StgSelector * p )
2016 const StgInfoTable *info_ptr;
2017 StgClosure *selectee;
2019 selectee = p->selectee;
2021 // Save the real info pointer (NOTE: not the same as get_itbl()).
2022 info_ptr = p->header.info;
2024 // If the THUNK_SELECTOR is in a generation that we are not
2025 // collecting, then bail out early. We won't be able to save any
2026 // space in any case, and updating with an indirection is trickier
2028 if (Bdescr((StgPtr)p)->gen_no > N) {
2032 // BLACKHOLE the selector thunk, since it is now under evaluation.
2033 // This is important to stop us going into an infinite loop if
2034 // this selector thunk eventually refers to itself.
2035 SET_INFO(p,&stg_BLACKHOLE_info);
2039 // We don't want to end up in to-space, because this causes
2040 // problems when the GC later tries to evacuate the result of
2041 // eval_thunk_selector(). There are various ways this could
2044 // - following an IND_STATIC
2046 // - when the old generation is compacted, the mark phase updates
2047 // from-space pointers to be to-space pointers, and we can't
2048 // reliably tell which we're following (eg. from an IND_STATIC).
2050 // So we use the block-descriptor test to find out if we're in
2053 if (HEAP_ALLOCED(selectee) &&
2054 Bdescr((StgPtr)selectee)->flags & BF_EVACUATED) {
2058 info = get_itbl(selectee);
2059 switch (info->type) {
2067 case CONSTR_NOCAF_STATIC:
2068 // check that the size is in range
2069 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
2070 info->layout.payload.nptrs));
2072 // ToDo: shouldn't we test whether this pointer is in
2074 return selectee->payload[field];
2079 case IND_OLDGEN_PERM:
2081 selectee = ((StgInd *)selectee)->indirectee;
2085 // We don't follow pointers into to-space; the constructor
2086 // has already been evacuated, so we won't save any space
2087 // leaks by evaluating this selector thunk anyhow.
2090 case THUNK_SELECTOR:
2094 // check that we don't recurse too much, re-using the
2095 // depth bound also used in evacuate().
2096 thunk_selector_depth++;
2097 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
2101 val = eval_thunk_selector(info->layout.selector_offset,
2102 (StgSelector *)selectee);
2104 thunk_selector_depth--;
2109 // We evaluated this selector thunk, so update it with
2110 // an indirection. NOTE: we don't use UPD_IND here,
2111 // because we are guaranteed that p is in a generation
2112 // that we are collecting, and we never want to put the
2113 // indirection on a mutable list.
2115 // For the purposes of LDV profiling, we have destroyed
2116 // the original selector thunk.
2117 SET_INFO(p, info_ptr);
2118 LDV_recordDead_FILL_SLOP_DYNAMIC(selectee);
2120 ((StgInd *)selectee)->indirectee = val;
2121 SET_INFO(selectee,&stg_IND_info);
2123 // For the purposes of LDV profiling, we have created an
2125 LDV_recordCreate(selectee);
2141 case SE_CAF_BLACKHOLE:
2154 // not evaluated yet
2158 barf("eval_thunk_selector: strange selectee %d",
2163 // We didn't manage to evaluate this thunk; restore the old info pointer
2164 SET_INFO(p, info_ptr);
2168 /* -----------------------------------------------------------------------------
2169 move_TSO is called to update the TSO structure after it has been
2170 moved from one place to another.
2171 -------------------------------------------------------------------------- */
2174 move_TSO (StgTSO *src, StgTSO *dest)
2178 // relocate the stack pointer...
2179 diff = (StgPtr)dest - (StgPtr)src; // In *words*
2180 dest->sp = (StgPtr)dest->sp + diff;
2183 /* Similar to scavenge_large_bitmap(), but we don't write back the
2184 * pointers we get back from evacuate().
2187 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
2194 bitmap = large_srt->l.bitmap[b];
2195 size = (nat)large_srt->l.size;
2197 for (i = 0; i < size; ) {
2198 if ((bitmap & 1) != 0) {
2203 if (i % BITS_IN(W_) == 0) {
2205 bitmap = large_srt->l.bitmap[b];
2207 bitmap = bitmap >> 1;
2212 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
2213 * srt field in the info table. That's ok, because we'll
2214 * never dereference it.
2217 scavenge_srt (StgClosure **srt, nat srt_bitmap)
2222 bitmap = srt_bitmap;
2225 if (bitmap == (StgHalfWord)(-1)) {
2226 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
2230 while (bitmap != 0) {
2231 if ((bitmap & 1) != 0) {
2232 #ifdef ENABLE_WIN32_DLL_SUPPORT
2233 // Special-case to handle references to closures hiding out in DLLs, since
2234 // double indirections required to get at those. The code generator knows
2235 // which is which when generating the SRT, so it stores the (indirect)
2236 // reference to the DLL closure in the table by first adding one to it.
2237 // We check for this here, and undo the addition before evacuating it.
2239 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2240 // closure that's fixed at link-time, and no extra magic is required.
2241 if ( (unsigned long)(*srt) & 0x1 ) {
2242 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2251 bitmap = bitmap >> 1;
2257 scavenge_thunk_srt(const StgInfoTable *info)
2259 StgThunkInfoTable *thunk_info;
2261 thunk_info = itbl_to_thunk_itbl(info);
2262 scavenge_srt((StgClosure **)thunk_info->srt, thunk_info->i.srt_bitmap);
2266 scavenge_fun_srt(const StgInfoTable *info)
2268 StgFunInfoTable *fun_info;
2270 fun_info = itbl_to_fun_itbl(info);
2271 scavenge_srt((StgClosure **)fun_info->srt, fun_info->i.srt_bitmap);
2275 scavenge_ret_srt(const StgInfoTable *info)
2277 StgRetInfoTable *ret_info;
2279 ret_info = itbl_to_ret_itbl(info);
2280 scavenge_srt((StgClosure **)ret_info->srt, ret_info->i.srt_bitmap);
2283 /* -----------------------------------------------------------------------------
2285 -------------------------------------------------------------------------- */
2288 scavengeTSO (StgTSO *tso)
2290 // chase the link field for any TSOs on the same queue
2291 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2292 if ( tso->why_blocked == BlockedOnMVar
2293 || tso->why_blocked == BlockedOnBlackHole
2294 || tso->why_blocked == BlockedOnException
2296 || tso->why_blocked == BlockedOnGA
2297 || tso->why_blocked == BlockedOnGA_NoSend
2300 tso->block_info.closure = evacuate(tso->block_info.closure);
2302 if ( tso->blocked_exceptions != NULL ) {
2303 tso->blocked_exceptions =
2304 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2307 // scavenge this thread's stack
2308 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2311 /* -----------------------------------------------------------------------------
2312 Blocks of function args occur on the stack (at the top) and
2314 -------------------------------------------------------------------------- */
2316 static inline StgPtr
2317 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
2324 switch (fun_info->fun_type) {
2326 bitmap = BITMAP_BITS(fun_info->bitmap);
2327 size = BITMAP_SIZE(fun_info->bitmap);
2330 size = ((StgLargeBitmap *)fun_info->bitmap)->size;
2331 scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size);
2335 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]);
2336 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->fun_type]);
2339 if ((bitmap & 1) == 0) {
2340 (StgClosure *)*p = evacuate((StgClosure *)*p);
2343 bitmap = bitmap >> 1;
2351 static inline StgPtr
2352 scavenge_PAP (StgPAP *pap)
2355 StgWord bitmap, size;
2356 StgFunInfoTable *fun_info;
2358 pap->fun = evacuate(pap->fun);
2359 fun_info = get_fun_itbl(pap->fun);
2360 ASSERT(fun_info->i.type != PAP);
2362 p = (StgPtr)pap->payload;
2365 switch (fun_info->fun_type) {
2367 bitmap = BITMAP_BITS(fun_info->bitmap);
2370 scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size);
2374 scavenge_large_bitmap((StgPtr)pap->payload, BCO_BITMAP(pap->fun), size);
2378 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]);
2382 if ((bitmap & 1) == 0) {
2383 (StgClosure *)*p = evacuate((StgClosure *)*p);
2386 bitmap = bitmap >> 1;
2394 /* -----------------------------------------------------------------------------
2395 Scavenge a given step until there are no more objects in this step
2398 evac_gen is set by the caller to be either zero (for a step in a
2399 generation < N) or G where G is the generation of the step being
2402 We sometimes temporarily change evac_gen back to zero if we're
2403 scavenging a mutable object where early promotion isn't such a good
2405 -------------------------------------------------------------------------- */
2413 nat saved_evac_gen = evac_gen;
2418 failed_to_evac = rtsFalse;
2420 /* scavenge phase - standard breadth-first scavenging of the
2424 while (bd != stp->hp_bd || p < stp->hp) {
2426 // If we're at the end of this block, move on to the next block
2427 if (bd != stp->hp_bd && p == bd->free) {
2433 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
2434 info = get_itbl((StgClosure *)p);
2436 ASSERT(thunk_selector_depth == 0);
2439 switch (info->type) {
2442 /* treat MVars specially, because we don't want to evacuate the
2443 * mut_link field in the middle of the closure.
2446 StgMVar *mvar = ((StgMVar *)p);
2448 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2449 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2450 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2451 evac_gen = saved_evac_gen;
2452 recordMutable((StgMutClosure *)mvar);
2453 failed_to_evac = rtsFalse; // mutable.
2454 p += sizeofW(StgMVar);
2459 scavenge_fun_srt(info);
2460 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2461 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2462 p += sizeofW(StgHeader) + 2;
2466 scavenge_thunk_srt(info);
2468 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2469 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2470 p += sizeofW(StgHeader) + 2;
2474 scavenge_thunk_srt(info);
2475 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2476 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2480 scavenge_fun_srt(info);
2482 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2483 p += sizeofW(StgHeader) + 1;
2487 scavenge_thunk_srt(info);
2488 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2492 scavenge_fun_srt(info);
2494 p += sizeofW(StgHeader) + 1;
2498 scavenge_thunk_srt(info);
2499 p += sizeofW(StgHeader) + 2;
2503 scavenge_fun_srt(info);
2505 p += sizeofW(StgHeader) + 2;
2509 scavenge_thunk_srt(info);
2510 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2511 p += sizeofW(StgHeader) + 2;
2515 scavenge_fun_srt(info);
2517 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2518 p += sizeofW(StgHeader) + 2;
2522 scavenge_fun_srt(info);
2526 scavenge_thunk_srt(info);
2537 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2538 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2539 (StgClosure *)*p = evacuate((StgClosure *)*p);
2541 p += info->layout.payload.nptrs;
2546 StgBCO *bco = (StgBCO *)p;
2547 (StgClosure *)bco->instrs = evacuate((StgClosure *)bco->instrs);
2548 (StgClosure *)bco->literals = evacuate((StgClosure *)bco->literals);
2549 (StgClosure *)bco->ptrs = evacuate((StgClosure *)bco->ptrs);
2550 (StgClosure *)bco->itbls = evacuate((StgClosure *)bco->itbls);
2551 p += bco_sizeW(bco);
2556 if (stp->gen->no != 0) {
2559 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2560 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2561 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2564 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
2566 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2569 // We pretend that p has just been created.
2570 LDV_recordCreate((StgClosure *)p);
2574 case IND_OLDGEN_PERM:
2575 ((StgIndOldGen *)p)->indirectee =
2576 evacuate(((StgIndOldGen *)p)->indirectee);
2577 if (failed_to_evac) {
2578 failed_to_evac = rtsFalse;
2579 recordOldToNewPtrs((StgMutClosure *)p);
2581 p += sizeofW(StgIndOldGen);
2586 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2587 evac_gen = saved_evac_gen;
2588 recordMutable((StgMutClosure *)p);
2589 failed_to_evac = rtsFalse; // mutable anyhow
2590 p += sizeofW(StgMutVar);
2595 failed_to_evac = rtsFalse; // mutable anyhow
2596 p += sizeofW(StgMutVar);
2600 case SE_CAF_BLACKHOLE:
2603 p += BLACKHOLE_sizeW();
2608 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2609 (StgClosure *)bh->blocking_queue =
2610 evacuate((StgClosure *)bh->blocking_queue);
2611 recordMutable((StgMutClosure *)bh);
2612 failed_to_evac = rtsFalse;
2613 p += BLACKHOLE_sizeW();
2617 case THUNK_SELECTOR:
2619 StgSelector *s = (StgSelector *)p;
2620 s->selectee = evacuate(s->selectee);
2621 p += THUNK_SELECTOR_sizeW();
2625 // A chunk of stack saved in a heap object
2628 StgAP_STACK *ap = (StgAP_STACK *)p;
2630 ap->fun = evacuate(ap->fun);
2631 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
2632 p = (StgPtr)ap->payload + ap->size;
2638 p = scavenge_PAP((StgPAP *)p);
2642 // nothing to follow
2643 p += arr_words_sizeW((StgArrWords *)p);
2647 // follow everything
2651 evac_gen = 0; // repeatedly mutable
2652 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2653 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2654 (StgClosure *)*p = evacuate((StgClosure *)*p);
2656 evac_gen = saved_evac_gen;
2657 recordMutable((StgMutClosure *)q);
2658 failed_to_evac = rtsFalse; // mutable anyhow.
2662 case MUT_ARR_PTRS_FROZEN:
2663 // follow everything
2667 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2668 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2669 (StgClosure *)*p = evacuate((StgClosure *)*p);
2671 // it's tempting to recordMutable() if failed_to_evac is
2672 // false, but that breaks some assumptions (eg. every
2673 // closure on the mutable list is supposed to have the MUT
2674 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2680 StgTSO *tso = (StgTSO *)p;
2683 evac_gen = saved_evac_gen;
2684 recordMutable((StgMutClosure *)tso);
2685 failed_to_evac = rtsFalse; // mutable anyhow.
2686 p += tso_sizeW(tso);
2691 case RBH: // cf. BLACKHOLE_BQ
2694 nat size, ptrs, nonptrs, vhs;
2696 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2698 StgRBH *rbh = (StgRBH *)p;
2699 (StgClosure *)rbh->blocking_queue =
2700 evacuate((StgClosure *)rbh->blocking_queue);
2701 recordMutable((StgMutClosure *)to);
2702 failed_to_evac = rtsFalse; // mutable anyhow.
2704 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2705 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2706 // ToDo: use size of reverted closure here!
2707 p += BLACKHOLE_sizeW();
2713 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2714 // follow the pointer to the node which is being demanded
2715 (StgClosure *)bf->node =
2716 evacuate((StgClosure *)bf->node);
2717 // follow the link to the rest of the blocking queue
2718 (StgClosure *)bf->link =
2719 evacuate((StgClosure *)bf->link);
2720 if (failed_to_evac) {
2721 failed_to_evac = rtsFalse;
2722 recordMutable((StgMutClosure *)bf);
2725 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2726 bf, info_type((StgClosure *)bf),
2727 bf->node, info_type(bf->node)));
2728 p += sizeofW(StgBlockedFetch);
2736 p += sizeofW(StgFetchMe);
2737 break; // nothing to do in this case
2739 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2741 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2742 (StgClosure *)fmbq->blocking_queue =
2743 evacuate((StgClosure *)fmbq->blocking_queue);
2744 if (failed_to_evac) {
2745 failed_to_evac = rtsFalse;
2746 recordMutable((StgMutClosure *)fmbq);
2749 belch("@@ scavenge: %p (%s) exciting, isn't it",
2750 p, info_type((StgClosure *)p)));
2751 p += sizeofW(StgFetchMeBlockingQueue);
2757 barf("scavenge: unimplemented/strange closure type %d @ %p",
2761 /* If we didn't manage to promote all the objects pointed to by
2762 * the current object, then we have to designate this object as
2763 * mutable (because it contains old-to-new generation pointers).
2765 if (failed_to_evac) {
2766 failed_to_evac = rtsFalse;
2767 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2775 /* -----------------------------------------------------------------------------
2776 Scavenge everything on the mark stack.
2778 This is slightly different from scavenge():
2779 - we don't walk linearly through the objects, so the scavenger
2780 doesn't need to advance the pointer on to the next object.
2781 -------------------------------------------------------------------------- */
2784 scavenge_mark_stack(void)
2790 evac_gen = oldest_gen->no;
2791 saved_evac_gen = evac_gen;
2794 while (!mark_stack_empty()) {
2795 p = pop_mark_stack();
2797 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
2798 info = get_itbl((StgClosure *)p);
2801 switch (info->type) {
2804 /* treat MVars specially, because we don't want to evacuate the
2805 * mut_link field in the middle of the closure.
2808 StgMVar *mvar = ((StgMVar *)p);
2810 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2811 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2812 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2813 evac_gen = saved_evac_gen;
2814 failed_to_evac = rtsFalse; // mutable.
2819 scavenge_fun_srt(info);
2820 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2821 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2825 scavenge_thunk_srt(info);
2827 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2828 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2833 scavenge_fun_srt(info);
2834 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2839 scavenge_thunk_srt(info);
2842 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2847 scavenge_fun_srt(info);
2852 scavenge_thunk_srt(info);
2860 scavenge_fun_srt(info);
2864 scavenge_thunk_srt(info);
2875 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2876 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2877 (StgClosure *)*p = evacuate((StgClosure *)*p);
2883 StgBCO *bco = (StgBCO *)p;
2884 (StgClosure *)bco->instrs = evacuate((StgClosure *)bco->instrs);
2885 (StgClosure *)bco->literals = evacuate((StgClosure *)bco->literals);
2886 (StgClosure *)bco->ptrs = evacuate((StgClosure *)bco->ptrs);
2887 (StgClosure *)bco->itbls = evacuate((StgClosure *)bco->itbls);
2892 // don't need to do anything here: the only possible case
2893 // is that we're in a 1-space compacting collector, with
2894 // no "old" generation.
2898 case IND_OLDGEN_PERM:
2899 ((StgIndOldGen *)p)->indirectee =
2900 evacuate(((StgIndOldGen *)p)->indirectee);
2901 if (failed_to_evac) {
2902 recordOldToNewPtrs((StgMutClosure *)p);
2904 failed_to_evac = rtsFalse;
2909 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2910 evac_gen = saved_evac_gen;
2911 failed_to_evac = rtsFalse;
2916 failed_to_evac = rtsFalse;
2920 case SE_CAF_BLACKHOLE:
2928 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2929 (StgClosure *)bh->blocking_queue =
2930 evacuate((StgClosure *)bh->blocking_queue);
2931 failed_to_evac = rtsFalse;
2935 case THUNK_SELECTOR:
2937 StgSelector *s = (StgSelector *)p;
2938 s->selectee = evacuate(s->selectee);
2942 // A chunk of stack saved in a heap object
2945 StgAP_STACK *ap = (StgAP_STACK *)p;
2947 ap->fun = evacuate(ap->fun);
2948 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
2954 scavenge_PAP((StgPAP *)p);
2958 // follow everything
2962 evac_gen = 0; // repeatedly mutable
2963 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2964 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2965 (StgClosure *)*p = evacuate((StgClosure *)*p);
2967 evac_gen = saved_evac_gen;
2968 failed_to_evac = rtsFalse; // mutable anyhow.
2972 case MUT_ARR_PTRS_FROZEN:
2973 // follow everything
2977 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2978 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2979 (StgClosure *)*p = evacuate((StgClosure *)*p);
2986 StgTSO *tso = (StgTSO *)p;
2989 evac_gen = saved_evac_gen;
2990 failed_to_evac = rtsFalse;
2995 case RBH: // cf. BLACKHOLE_BQ
2998 nat size, ptrs, nonptrs, vhs;
3000 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3002 StgRBH *rbh = (StgRBH *)p;
3003 (StgClosure *)rbh->blocking_queue =
3004 evacuate((StgClosure *)rbh->blocking_queue);
3005 recordMutable((StgMutClosure *)rbh);
3006 failed_to_evac = rtsFalse; // mutable anyhow.
3008 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3009 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3015 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3016 // follow the pointer to the node which is being demanded
3017 (StgClosure *)bf->node =
3018 evacuate((StgClosure *)bf->node);
3019 // follow the link to the rest of the blocking queue
3020 (StgClosure *)bf->link =
3021 evacuate((StgClosure *)bf->link);
3022 if (failed_to_evac) {
3023 failed_to_evac = rtsFalse;
3024 recordMutable((StgMutClosure *)bf);
3027 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3028 bf, info_type((StgClosure *)bf),
3029 bf->node, info_type(bf->node)));
3037 break; // nothing to do in this case
3039 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3041 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3042 (StgClosure *)fmbq->blocking_queue =
3043 evacuate((StgClosure *)fmbq->blocking_queue);
3044 if (failed_to_evac) {
3045 failed_to_evac = rtsFalse;
3046 recordMutable((StgMutClosure *)fmbq);
3049 belch("@@ scavenge: %p (%s) exciting, isn't it",
3050 p, info_type((StgClosure *)p)));
3056 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
3060 if (failed_to_evac) {
3061 failed_to_evac = rtsFalse;
3062 mkMutCons((StgClosure *)q, &generations[evac_gen]);
3065 // mark the next bit to indicate "scavenged"
3066 mark(q+1, Bdescr(q));
3068 } // while (!mark_stack_empty())
3070 // start a new linear scan if the mark stack overflowed at some point
3071 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
3072 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
3073 mark_stack_overflowed = rtsFalse;
3074 oldgen_scan_bd = oldest_gen->steps[0].blocks;
3075 oldgen_scan = oldgen_scan_bd->start;
3078 if (oldgen_scan_bd) {
3079 // push a new thing on the mark stack
3081 // find a closure that is marked but not scavenged, and start
3083 while (oldgen_scan < oldgen_scan_bd->free
3084 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
3088 if (oldgen_scan < oldgen_scan_bd->free) {
3090 // already scavenged?
3091 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
3092 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
3095 push_mark_stack(oldgen_scan);
3096 // ToDo: bump the linear scan by the actual size of the object
3097 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
3101 oldgen_scan_bd = oldgen_scan_bd->link;
3102 if (oldgen_scan_bd != NULL) {
3103 oldgen_scan = oldgen_scan_bd->start;
3109 /* -----------------------------------------------------------------------------
3110 Scavenge one object.
3112 This is used for objects that are temporarily marked as mutable
3113 because they contain old-to-new generation pointers. Only certain
3114 objects can have this property.
3115 -------------------------------------------------------------------------- */
3118 scavenge_one(StgPtr p)
3120 const StgInfoTable *info;
3121 nat saved_evac_gen = evac_gen;
3124 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3125 info = get_itbl((StgClosure *)p);
3127 switch (info->type) {
3130 case FUN_1_0: // hardly worth specialising these guys
3150 case IND_OLDGEN_PERM:
3154 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3155 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
3156 (StgClosure *)*q = evacuate((StgClosure *)*q);
3162 case SE_CAF_BLACKHOLE:
3167 case THUNK_SELECTOR:
3169 StgSelector *s = (StgSelector *)p;
3170 s->selectee = evacuate(s->selectee);
3175 // nothing to follow
3180 // follow everything
3183 evac_gen = 0; // repeatedly mutable
3184 recordMutable((StgMutClosure *)p);
3185 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3186 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3187 (StgClosure *)*p = evacuate((StgClosure *)*p);
3189 evac_gen = saved_evac_gen;
3190 failed_to_evac = rtsFalse;
3194 case MUT_ARR_PTRS_FROZEN:
3196 // follow everything
3199 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3200 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3201 (StgClosure *)*p = evacuate((StgClosure *)*p);
3208 StgTSO *tso = (StgTSO *)p;
3210 evac_gen = 0; // repeatedly mutable
3212 recordMutable((StgMutClosure *)tso);
3213 evac_gen = saved_evac_gen;
3214 failed_to_evac = rtsFalse;
3220 StgAP_STACK *ap = (StgAP_STACK *)p;
3222 ap->fun = evacuate(ap->fun);
3223 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3224 p = (StgPtr)ap->payload + ap->size;
3230 p = scavenge_PAP((StgPAP *)p);
3234 // This might happen if for instance a MUT_CONS was pointing to a
3235 // THUNK which has since been updated. The IND_OLDGEN will
3236 // be on the mutable list anyway, so we don't need to do anything
3241 barf("scavenge_one: strange object %d", (int)(info->type));
3244 no_luck = failed_to_evac;
3245 failed_to_evac = rtsFalse;
3249 /* -----------------------------------------------------------------------------
3250 Scavenging mutable lists.
3252 We treat the mutable list of each generation > N (i.e. all the
3253 generations older than the one being collected) as roots. We also
3254 remove non-mutable objects from the mutable list at this point.
3255 -------------------------------------------------------------------------- */
3258 scavenge_mut_once_list(generation *gen)
3260 const StgInfoTable *info;
3261 StgMutClosure *p, *next, *new_list;
3263 p = gen->mut_once_list;
3264 new_list = END_MUT_LIST;
3268 failed_to_evac = rtsFalse;
3270 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3272 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3275 if (info->type==RBH)
3276 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3278 switch(info->type) {
3281 case IND_OLDGEN_PERM:
3283 /* Try to pull the indirectee into this generation, so we can
3284 * remove the indirection from the mutable list.
3286 ((StgIndOldGen *)p)->indirectee =
3287 evacuate(((StgIndOldGen *)p)->indirectee);
3289 #if 0 && defined(DEBUG)
3290 if (RtsFlags.DebugFlags.gc)
3291 /* Debugging code to print out the size of the thing we just
3295 StgPtr start = gen->steps[0].scan;
3296 bdescr *start_bd = gen->steps[0].scan_bd;
3298 scavenge(&gen->steps[0]);
3299 if (start_bd != gen->steps[0].scan_bd) {
3300 size += (P_)BLOCK_ROUND_UP(start) - start;
3301 start_bd = start_bd->link;
3302 while (start_bd != gen->steps[0].scan_bd) {
3303 size += BLOCK_SIZE_W;
3304 start_bd = start_bd->link;
3306 size += gen->steps[0].scan -
3307 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
3309 size = gen->steps[0].scan - start;
3311 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
3315 /* failed_to_evac might happen if we've got more than two
3316 * generations, we're collecting only generation 0, the
3317 * indirection resides in generation 2 and the indirectee is
3320 if (failed_to_evac) {
3321 failed_to_evac = rtsFalse;
3322 p->mut_link = new_list;
3325 /* the mut_link field of an IND_STATIC is overloaded as the
3326 * static link field too (it just so happens that we don't need
3327 * both at the same time), so we need to NULL it out when
3328 * removing this object from the mutable list because the static
3329 * link fields are all assumed to be NULL before doing a major
3337 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
3338 * it from the mutable list if possible by promoting whatever it
3341 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
3342 /* didn't manage to promote everything, so put the
3343 * MUT_CONS back on the list.
3345 p->mut_link = new_list;
3351 // shouldn't have anything else on the mutables list
3352 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
3356 gen->mut_once_list = new_list;
3361 scavenge_mutable_list(generation *gen)
3363 const StgInfoTable *info;
3364 StgMutClosure *p, *next;
3366 p = gen->saved_mut_list;
3370 failed_to_evac = rtsFalse;
3372 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3374 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3377 if (info->type==RBH)
3378 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3380 switch(info->type) {
3383 // follow everything
3384 p->mut_link = gen->mut_list;
3389 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3390 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3391 (StgClosure *)*q = evacuate((StgClosure *)*q);
3396 // Happens if a MUT_ARR_PTRS in the old generation is frozen
3397 case MUT_ARR_PTRS_FROZEN:
3402 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3403 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3404 (StgClosure *)*q = evacuate((StgClosure *)*q);
3408 if (failed_to_evac) {
3409 failed_to_evac = rtsFalse;
3410 mkMutCons((StgClosure *)p, gen);
3416 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3417 p->mut_link = gen->mut_list;
3423 StgMVar *mvar = (StgMVar *)p;
3424 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
3425 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
3426 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
3427 p->mut_link = gen->mut_list;
3434 StgTSO *tso = (StgTSO *)p;
3438 /* Don't take this TSO off the mutable list - it might still
3439 * point to some younger objects (because we set evac_gen to 0
3442 tso->mut_link = gen->mut_list;
3443 gen->mut_list = (StgMutClosure *)tso;
3449 StgBlockingQueue *bh = (StgBlockingQueue *)p;
3450 (StgClosure *)bh->blocking_queue =
3451 evacuate((StgClosure *)bh->blocking_queue);
3452 p->mut_link = gen->mut_list;
3457 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3460 case IND_OLDGEN_PERM:
3461 /* Try to pull the indirectee into this generation, so we can
3462 * remove the indirection from the mutable list.
3465 ((StgIndOldGen *)p)->indirectee =
3466 evacuate(((StgIndOldGen *)p)->indirectee);
3469 if (failed_to_evac) {
3470 failed_to_evac = rtsFalse;
3471 p->mut_link = gen->mut_once_list;
3472 gen->mut_once_list = p;
3479 // HWL: check whether all of these are necessary
3481 case RBH: // cf. BLACKHOLE_BQ
3483 // nat size, ptrs, nonptrs, vhs;
3485 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3486 StgRBH *rbh = (StgRBH *)p;
3487 (StgClosure *)rbh->blocking_queue =
3488 evacuate((StgClosure *)rbh->blocking_queue);
3489 if (failed_to_evac) {
3490 failed_to_evac = rtsFalse;
3491 recordMutable((StgMutClosure *)rbh);
3493 // ToDo: use size of reverted closure here!
3494 p += BLACKHOLE_sizeW();
3500 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3501 // follow the pointer to the node which is being demanded
3502 (StgClosure *)bf->node =
3503 evacuate((StgClosure *)bf->node);
3504 // follow the link to the rest of the blocking queue
3505 (StgClosure *)bf->link =
3506 evacuate((StgClosure *)bf->link);
3507 if (failed_to_evac) {
3508 failed_to_evac = rtsFalse;
3509 recordMutable((StgMutClosure *)bf);
3511 p += sizeofW(StgBlockedFetch);
3517 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3520 p += sizeofW(StgFetchMe);
3521 break; // nothing to do in this case
3523 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3525 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3526 (StgClosure *)fmbq->blocking_queue =
3527 evacuate((StgClosure *)fmbq->blocking_queue);
3528 if (failed_to_evac) {
3529 failed_to_evac = rtsFalse;
3530 recordMutable((StgMutClosure *)fmbq);
3532 p += sizeofW(StgFetchMeBlockingQueue);
3538 // shouldn't have anything else on the mutables list
3539 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3546 scavenge_static(void)
3548 StgClosure* p = static_objects;
3549 const StgInfoTable *info;
3551 /* Always evacuate straight to the oldest generation for static
3553 evac_gen = oldest_gen->no;
3555 /* keep going until we've scavenged all the objects on the linked
3557 while (p != END_OF_STATIC_LIST) {
3559 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3562 if (info->type==RBH)
3563 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3565 // make sure the info pointer is into text space
3567 /* Take this object *off* the static_objects list,
3568 * and put it on the scavenged_static_objects list.
3570 static_objects = STATIC_LINK(info,p);
3571 STATIC_LINK(info,p) = scavenged_static_objects;
3572 scavenged_static_objects = p;
3574 switch (info -> type) {
3578 StgInd *ind = (StgInd *)p;
3579 ind->indirectee = evacuate(ind->indirectee);
3581 /* might fail to evacuate it, in which case we have to pop it
3582 * back on the mutable list (and take it off the
3583 * scavenged_static list because the static link and mut link
3584 * pointers are one and the same).
3586 if (failed_to_evac) {
3587 failed_to_evac = rtsFalse;
3588 scavenged_static_objects = IND_STATIC_LINK(p);
3589 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3590 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3596 scavenge_thunk_srt(info);
3600 scavenge_fun_srt(info);
3607 next = (P_)p->payload + info->layout.payload.ptrs;
3608 // evacuate the pointers
3609 for (q = (P_)p->payload; q < next; q++) {
3610 (StgClosure *)*q = evacuate((StgClosure *)*q);
3616 barf("scavenge_static: strange closure %d", (int)(info->type));
3619 ASSERT(failed_to_evac == rtsFalse);
3621 /* get the next static object from the list. Remember, there might
3622 * be more stuff on this list now that we've done some evacuating!
3623 * (static_objects is a global)
3629 /* -----------------------------------------------------------------------------
3630 scavenge a chunk of memory described by a bitmap
3631 -------------------------------------------------------------------------- */
3634 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
3640 bitmap = large_bitmap->bitmap[b];
3641 for (i = 0; i < size; ) {
3642 if ((bitmap & 1) == 0) {
3643 (StgClosure *)*p = evacuate((StgClosure *)*p);
3647 if (i % BITS_IN(W_) == 0) {
3649 bitmap = large_bitmap->bitmap[b];
3651 bitmap = bitmap >> 1;
3656 static inline StgPtr
3657 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
3660 if ((bitmap & 1) == 0) {
3661 (StgClosure *)*p = evacuate((StgClosure *)*p);
3664 bitmap = bitmap >> 1;
3670 /* -----------------------------------------------------------------------------
3671 scavenge_stack walks over a section of stack and evacuates all the
3672 objects pointed to by it. We can use the same code for walking
3673 AP_STACK_UPDs, since these are just sections of copied stack.
3674 -------------------------------------------------------------------------- */
3678 scavenge_stack(StgPtr p, StgPtr stack_end)
3680 const StgRetInfoTable* info;
3684 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3687 * Each time around this loop, we are looking at a chunk of stack
3688 * that starts with an activation record.
3691 while (p < stack_end) {
3692 info = get_ret_itbl((StgClosure *)p);
3694 switch (info->i.type) {
3697 ((StgUpdateFrame *)p)->updatee
3698 = evacuate(((StgUpdateFrame *)p)->updatee);
3699 p += sizeofW(StgUpdateFrame);
3702 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3707 bitmap = BITMAP_BITS(info->i.layout.bitmap);
3708 size = BITMAP_SIZE(info->i.layout.bitmap);
3709 // NOTE: the payload starts immediately after the info-ptr, we
3710 // don't have an StgHeader in the same sense as a heap closure.
3712 p = scavenge_small_bitmap(p, size, bitmap);
3715 scavenge_srt((StgClosure **)info->srt, info->i.srt_bitmap);
3723 (StgClosure *)*p = evacuate((StgClosure *)*p);
3726 size = BCO_BITMAP_SIZE(bco);
3727 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
3732 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3738 size = info->i.layout.large_bitmap->size;
3740 scavenge_large_bitmap(p, info->i.layout.large_bitmap, size);
3742 // and don't forget to follow the SRT
3746 // Dynamic bitmap: the mask is stored on the stack, and
3747 // there are a number of non-pointers followed by a number
3748 // of pointers above the bitmapped area. (see StgMacros.h,
3753 dyn = ((StgRetDyn *)p)->liveness;
3755 // traverse the bitmap first
3756 bitmap = GET_LIVENESS(dyn);
3757 p = (P_)&((StgRetDyn *)p)->payload[0];
3758 size = RET_DYN_BITMAP_SIZE;
3759 p = scavenge_small_bitmap(p, size, bitmap);
3761 // skip over the non-ptr words
3762 p += GET_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
3764 // follow the ptr words
3765 for (size = GET_PTRS(dyn); size > 0; size--) {
3766 (StgClosure *)*p = evacuate((StgClosure *)*p);
3774 StgRetFun *ret_fun = (StgRetFun *)p;
3775 StgFunInfoTable *fun_info;
3777 ret_fun->fun = evacuate(ret_fun->fun);
3778 fun_info = get_fun_itbl(ret_fun->fun);
3779 p = scavenge_arg_block(fun_info, ret_fun->payload);
3784 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
3789 /*-----------------------------------------------------------------------------
3790 scavenge the large object list.
3792 evac_gen set by caller; similar games played with evac_gen as with
3793 scavenge() - see comment at the top of scavenge(). Most large
3794 objects are (repeatedly) mutable, so most of the time evac_gen will
3796 --------------------------------------------------------------------------- */
3799 scavenge_large(step *stp)
3804 bd = stp->new_large_objects;
3806 for (; bd != NULL; bd = stp->new_large_objects) {
3808 /* take this object *off* the large objects list and put it on
3809 * the scavenged large objects list. This is so that we can
3810 * treat new_large_objects as a stack and push new objects on
3811 * the front when evacuating.
3813 stp->new_large_objects = bd->link;
3814 dbl_link_onto(bd, &stp->scavenged_large_objects);
3816 // update the block count in this step.
3817 stp->n_scavenged_large_blocks += bd->blocks;
3820 if (scavenge_one(p)) {
3821 mkMutCons((StgClosure *)p, stp->gen);
3826 /* -----------------------------------------------------------------------------
3827 Initialising the static object & mutable lists
3828 -------------------------------------------------------------------------- */
3831 zero_static_object_list(StgClosure* first_static)
3835 const StgInfoTable *info;
3837 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3839 link = STATIC_LINK(info, p);
3840 STATIC_LINK(info,p) = NULL;
3844 /* This function is only needed because we share the mutable link
3845 * field with the static link field in an IND_STATIC, so we have to
3846 * zero the mut_link field before doing a major GC, which needs the
3847 * static link field.
3849 * It doesn't do any harm to zero all the mutable link fields on the
3854 zero_mutable_list( StgMutClosure *first )
3856 StgMutClosure *next, *c;
3858 for (c = first; c != END_MUT_LIST; c = next) {
3864 /* -----------------------------------------------------------------------------
3866 -------------------------------------------------------------------------- */
3873 for (c = (StgIndStatic *)caf_list; c != NULL;
3874 c = (StgIndStatic *)c->static_link)
3876 c->header.info = c->saved_info;
3877 c->saved_info = NULL;
3878 // could, but not necessary: c->static_link = NULL;
3884 markCAFs( evac_fn evac )
3888 for (c = (StgIndStatic *)caf_list; c != NULL;
3889 c = (StgIndStatic *)c->static_link)
3891 evac(&c->indirectee);
3895 /* -----------------------------------------------------------------------------
3896 Sanity code for CAF garbage collection.
3898 With DEBUG turned on, we manage a CAF list in addition to the SRT
3899 mechanism. After GC, we run down the CAF list and blackhole any
3900 CAFs which have been garbage collected. This means we get an error
3901 whenever the program tries to enter a garbage collected CAF.
3903 Any garbage collected CAFs are taken off the CAF list at the same
3905 -------------------------------------------------------------------------- */
3907 #if 0 && defined(DEBUG)
3914 const StgInfoTable *info;
3925 ASSERT(info->type == IND_STATIC);
3927 if (STATIC_LINK(info,p) == NULL) {
3928 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3930 SET_INFO(p,&stg_BLACKHOLE_info);
3931 p = STATIC_LINK2(info,p);
3935 pp = &STATIC_LINK2(info,p);
3942 // belch("%d CAFs live", i);
3947 /* -----------------------------------------------------------------------------
3950 Whenever a thread returns to the scheduler after possibly doing
3951 some work, we have to run down the stack and black-hole all the
3952 closures referred to by update frames.
3953 -------------------------------------------------------------------------- */
3956 threadLazyBlackHole(StgTSO *tso)
3959 StgRetInfoTable *info;
3960 StgBlockingQueue *bh;
3963 stack_end = &tso->stack[tso->stack_size];
3965 frame = (StgClosure *)tso->sp;
3968 info = get_ret_itbl(frame);
3970 switch (info->i.type) {
3973 bh = (StgBlockingQueue *)((StgUpdateFrame *)frame)->updatee;
3975 /* if the thunk is already blackholed, it means we've also
3976 * already blackholed the rest of the thunks on this stack,
3977 * so we can stop early.
3979 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3980 * don't interfere with this optimisation.
3982 if (bh->header.info == &stg_BLACKHOLE_info) {
3986 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3987 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3988 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3989 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3993 // We pretend that bh is now dead.
3994 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
3996 SET_INFO(bh,&stg_BLACKHOLE_info);
3999 // We pretend that bh has just been created.
4000 LDV_recordCreate(bh);
4004 frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
4010 // normal stack frames; do nothing except advance the pointer
4012 (StgPtr)frame += stack_frame_sizeW(frame);
4018 /* -----------------------------------------------------------------------------
4021 * Code largely pinched from old RTS, then hacked to bits. We also do
4022 * lazy black holing here.
4024 * -------------------------------------------------------------------------- */
4026 struct stack_gap { StgWord gap_size; struct stack_gap *next_gap; };
4029 threadSqueezeStack(StgTSO *tso)
4032 rtsBool prev_was_update_frame;
4033 StgClosure *updatee = NULL;
4035 StgRetInfoTable *info;
4036 StgWord current_gap_size;
4037 struct stack_gap *gap;
4040 // Traverse the stack upwards, replacing adjacent update frames
4041 // with a single update frame and a "stack gap". A stack gap
4042 // contains two values: the size of the gap, and the distance
4043 // to the next gap (or the stack top).
4045 bottom = &(tso->stack[tso->stack_size]);
4049 ASSERT(frame < bottom);
4051 prev_was_update_frame = rtsFalse;
4052 current_gap_size = 0;
4053 gap = (struct stack_gap *) (tso->sp - sizeofW(StgUpdateFrame));
4055 while (frame < bottom) {
4057 info = get_ret_itbl((StgClosure *)frame);
4058 switch (info->i.type) {
4062 StgUpdateFrame *upd = (StgUpdateFrame *)frame;
4064 if (upd->updatee->header.info == &stg_BLACKHOLE_info) {
4066 // found a BLACKHOLE'd update frame; we've been here
4067 // before, in a previous GC, so just break out.
4069 // Mark the end of the gap, if we're in one.
4070 if (current_gap_size != 0) {
4071 gap = (struct stack_gap *)(frame-sizeofW(StgUpdateFrame));
4074 frame += sizeofW(StgUpdateFrame);
4075 goto done_traversing;
4078 if (prev_was_update_frame) {
4080 TICK_UPD_SQUEEZED();
4081 /* wasn't there something about update squeezing and ticky to be
4082 * sorted out? oh yes: we aren't counting each enter properly
4083 * in this case. See the log somewhere. KSW 1999-04-21
4085 * Check two things: that the two update frames don't point to
4086 * the same object, and that the updatee_bypass isn't already an
4087 * indirection. Both of these cases only happen when we're in a
4088 * block hole-style loop (and there are multiple update frames
4089 * on the stack pointing to the same closure), but they can both
4090 * screw us up if we don't check.
4092 if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
4093 // this wakes the threads up
4094 UPD_IND_NOLOCK(upd->updatee, updatee);
4097 // now mark this update frame as a stack gap. The gap
4098 // marker resides in the bottom-most update frame of
4099 // the series of adjacent frames, and covers all the
4100 // frames in this series.
4101 current_gap_size += sizeofW(StgUpdateFrame);
4102 ((struct stack_gap *)frame)->gap_size = current_gap_size;
4103 ((struct stack_gap *)frame)->next_gap = gap;
4105 frame += sizeofW(StgUpdateFrame);
4109 // single update frame, or the topmost update frame in a series
4111 StgBlockingQueue *bh = (StgBlockingQueue *)upd->updatee;
4113 // Do lazy black-holing
4114 if (bh->header.info != &stg_BLACKHOLE_info &&
4115 bh->header.info != &stg_BLACKHOLE_BQ_info &&
4116 bh->header.info != &stg_CAF_BLACKHOLE_info) {
4117 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
4118 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
4121 /* zero out the slop so that the sanity checker can tell
4122 * where the next closure is.
4125 StgInfoTable *bh_info = get_itbl(bh);
4126 nat np = bh_info->layout.payload.ptrs,
4127 nw = bh_info->layout.payload.nptrs, i;
4128 /* don't zero out slop for a THUNK_SELECTOR,
4129 * because its layout info is used for a
4130 * different purpose, and it's exactly the
4131 * same size as a BLACKHOLE in any case.
4133 if (bh_info->type != THUNK_SELECTOR) {
4134 for (i = np; i < np + nw; i++) {
4135 ((StgClosure *)bh)->payload[i] = 0;
4141 // We pretend that bh is now dead.
4142 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
4144 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
4145 SET_INFO(bh,&stg_BLACKHOLE_info);
4147 // We pretend that bh has just been created.
4148 LDV_recordCreate(bh);
4152 prev_was_update_frame = rtsTrue;
4153 updatee = upd->updatee;
4154 frame += sizeofW(StgUpdateFrame);
4160 prev_was_update_frame = rtsFalse;
4162 // we're not in a gap... check whether this is the end of a gap
4163 // (an update frame can't be the end of a gap).
4164 if (current_gap_size != 0) {
4165 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
4167 current_gap_size = 0;
4169 frame += stack_frame_sizeW((StgClosure *)frame);
4176 // Now we have a stack with gaps in it, and we have to walk down
4177 // shoving the stack up to fill in the gaps. A diagram might
4181 // | ********* | <- sp
4185 // | stack_gap | <- gap | chunk_size
4187 // | ......... | <- gap_end v
4193 // 'sp' points the the current top-of-stack
4194 // 'gap' points to the stack_gap structure inside the gap
4195 // ***** indicates real stack data
4196 // ..... indicates gap
4197 // <empty> indicates unused
4201 void *gap_start, *next_gap_start, *gap_end;
4204 next_gap_start = (void *)gap + sizeof(StgUpdateFrame);
4205 sp = next_gap_start;
4207 while ((StgPtr)gap > tso->sp) {
4209 // we're working in *bytes* now...
4210 gap_start = next_gap_start;
4211 gap_end = gap_start - gap->gap_size * sizeof(W_);
4213 gap = gap->next_gap;
4214 next_gap_start = (void *)gap + sizeof(StgUpdateFrame);
4216 chunk_size = gap_end - next_gap_start;
4218 memmove(sp, next_gap_start, chunk_size);
4221 tso->sp = (StgPtr)sp;
4225 /* -----------------------------------------------------------------------------
4228 * We have to prepare for GC - this means doing lazy black holing
4229 * here. We also take the opportunity to do stack squeezing if it's
4231 * -------------------------------------------------------------------------- */
4233 threadPaused(StgTSO *tso)
4235 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
4236 threadSqueezeStack(tso); // does black holing too
4238 threadLazyBlackHole(tso);
4241 /* -----------------------------------------------------------------------------
4243 * -------------------------------------------------------------------------- */
4247 printMutOnceList(generation *gen)
4249 StgMutClosure *p, *next;
4251 p = gen->mut_once_list;
4254 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
4255 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
4256 fprintf(stderr, "%p (%s), ",
4257 p, info_type((StgClosure *)p));
4259 fputc('\n', stderr);
4263 printMutableList(generation *gen)
4265 StgMutClosure *p, *next;
4270 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
4271 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
4272 fprintf(stderr, "%p (%s), ",
4273 p, info_type((StgClosure *)p));
4275 fputc('\n', stderr);
4278 static inline rtsBool
4279 maybeLarge(StgClosure *closure)
4281 StgInfoTable *info = get_itbl(closure);
4283 /* closure types that may be found on the new_large_objects list;
4284 see scavenge_large */
4285 return (info->type == MUT_ARR_PTRS ||
4286 info->type == MUT_ARR_PTRS_FROZEN ||
4287 info->type == TSO ||
4288 info->type == ARR_WORDS);