1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.162 2003/10/24 11:45:40 simonmar Exp $
4 * (c) The GHC Team 1998-2003
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
16 #include "StoragePriv.h"
19 #include "SchedAPI.h" // for ReverCAFs prototype
21 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
31 #if defined(GRAN) || defined(PAR)
32 # include "GranSimRts.h"
33 # include "ParallelRts.h"
37 # include "ParallelDebug.h"
42 #if defined(RTS_GTK_FRONTPANEL)
43 #include "FrontPanel.h"
46 #include "RetainerProfile.h"
47 #include "LdvProfile.h"
51 /* STATIC OBJECT LIST.
54 * We maintain a linked list of static objects that are still live.
55 * The requirements for this list are:
57 * - we need to scan the list while adding to it, in order to
58 * scavenge all the static objects (in the same way that
59 * breadth-first scavenging works for dynamic objects).
61 * - we need to be able to tell whether an object is already on
62 * the list, to break loops.
64 * Each static object has a "static link field", which we use for
65 * linking objects on to the list. We use a stack-type list, consing
66 * objects on the front as they are added (this means that the
67 * scavenge phase is depth-first, not breadth-first, but that
70 * A separate list is kept for objects that have been scavenged
71 * already - this is so that we can zero all the marks afterwards.
73 * An object is on the list if its static link field is non-zero; this
74 * means that we have to mark the end of the list with '1', not NULL.
76 * Extra notes for generational GC:
78 * Each generation has a static object list associated with it. When
79 * collecting generations up to N, we treat the static object lists
80 * from generations > N as roots.
82 * We build up a static object list while collecting generations 0..N,
83 * which is then appended to the static object list of generation N+1.
85 static StgClosure* static_objects; // live static objects
86 StgClosure* scavenged_static_objects; // static objects scavenged so far
88 /* N is the oldest generation being collected, where the generations
89 * are numbered starting at 0. A major GC (indicated by the major_gc
90 * flag) is when we're collecting all generations. We only attempt to
91 * deal with static objects and GC CAFs when doing a major GC.
94 static rtsBool major_gc;
96 /* Youngest generation that objects should be evacuated to in
97 * evacuate(). (Logically an argument to evacuate, but it's static
98 * a lot of the time so we optimise it into a global variable).
104 StgWeak *old_weak_ptr_list; // also pending finaliser list
106 /* Which stage of processing various kinds of weak pointer are we at?
107 * (see traverse_weak_ptr_list() below for discussion).
109 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
110 static WeakStage weak_stage;
112 /* List of all threads during GC
114 static StgTSO *old_all_threads;
115 StgTSO *resurrected_threads;
117 /* Flag indicating failure to evacuate an object to the desired
120 static rtsBool failed_to_evac;
122 /* Old to-space (used for two-space collector only)
124 static bdescr *old_to_blocks;
126 /* Data used for allocation area sizing.
128 static lnat new_blocks; // blocks allocated during this GC
129 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
131 /* Used to avoid long recursion due to selector thunks
133 static lnat thunk_selector_depth = 0;
134 #define MAX_THUNK_SELECTOR_DEPTH 8
136 /* -----------------------------------------------------------------------------
137 Static function declarations
138 -------------------------------------------------------------------------- */
140 static bdescr * gc_alloc_block ( step *stp );
141 static void mark_root ( StgClosure **root );
143 // Use a register argument for evacuate, if available.
145 static StgClosure * evacuate (StgClosure *q) __attribute__((regparm(1)));
147 static StgClosure * evacuate (StgClosure *q);
150 static void zero_static_object_list ( StgClosure* first_static );
151 static void zero_mutable_list ( StgMutClosure *first );
153 static rtsBool traverse_weak_ptr_list ( void );
154 static void mark_weak_ptr_list ( StgWeak **list );
156 static StgClosure * eval_thunk_selector ( nat field, StgSelector * p );
159 static void scavenge ( step * );
160 static void scavenge_mark_stack ( void );
161 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
162 static rtsBool scavenge_one ( StgPtr p );
163 static void scavenge_large ( step * );
164 static void scavenge_static ( void );
165 static void scavenge_mutable_list ( generation *g );
166 static void scavenge_mut_once_list ( generation *g );
168 static void scavenge_large_bitmap ( StgPtr p,
169 StgLargeBitmap *large_bitmap,
172 #if 0 && defined(DEBUG)
173 static void gcCAFs ( void );
176 /* -----------------------------------------------------------------------------
177 inline functions etc. for dealing with the mark bitmap & stack.
178 -------------------------------------------------------------------------- */
180 #define MARK_STACK_BLOCKS 4
182 static bdescr *mark_stack_bdescr;
183 static StgPtr *mark_stack;
184 static StgPtr *mark_sp;
185 static StgPtr *mark_splim;
187 // Flag and pointers used for falling back to a linear scan when the
188 // mark stack overflows.
189 static rtsBool mark_stack_overflowed;
190 static bdescr *oldgen_scan_bd;
191 static StgPtr oldgen_scan;
193 static inline rtsBool
194 mark_stack_empty(void)
196 return mark_sp == mark_stack;
199 static inline rtsBool
200 mark_stack_full(void)
202 return mark_sp >= mark_splim;
206 reset_mark_stack(void)
208 mark_sp = mark_stack;
212 push_mark_stack(StgPtr p)
223 /* -----------------------------------------------------------------------------
224 Allocate a new to-space block in the given step.
225 -------------------------------------------------------------------------- */
228 gc_alloc_block(step *stp)
230 bdescr *bd = allocBlock();
231 bd->gen_no = stp->gen_no;
235 // blocks in to-space in generations up to and including N
236 // get the BF_EVACUATED flag.
237 if (stp->gen_no <= N) {
238 bd->flags = BF_EVACUATED;
243 // Start a new to-space block, chain it on after the previous one.
244 if (stp->hp_bd == NULL) {
247 stp->hp_bd->free = stp->hp;
248 stp->hp_bd->link = bd;
253 stp->hpLim = stp->hp + BLOCK_SIZE_W;
261 /* -----------------------------------------------------------------------------
264 Rough outline of the algorithm: for garbage collecting generation N
265 (and all younger generations):
267 - follow all pointers in the root set. the root set includes all
268 mutable objects in all generations (mutable_list and mut_once_list).
270 - for each pointer, evacuate the object it points to into either
272 + to-space of the step given by step->to, which is the next
273 highest step in this generation or the first step in the next
274 generation if this is the last step.
276 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
277 When we evacuate an object we attempt to evacuate
278 everything it points to into the same generation - this is
279 achieved by setting evac_gen to the desired generation. If
280 we can't do this, then an entry in the mut_once list has to
281 be made for the cross-generation pointer.
283 + if the object is already in a generation > N, then leave
286 - repeatedly scavenge to-space from each step in each generation
287 being collected until no more objects can be evacuated.
289 - free from-space in each step, and set from-space = to-space.
291 Locks held: sched_mutex
293 -------------------------------------------------------------------------- */
296 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
300 lnat live, allocated, collected = 0, copied = 0;
301 lnat oldgen_saved_blocks = 0;
305 CostCentreStack *prev_CCS;
308 #if defined(DEBUG) && defined(GRAN)
309 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
313 #if defined(RTS_USER_SIGNALS)
318 // tell the stats department that we've started a GC
321 // Init stats and print par specific (timing) info
322 PAR_TICKY_PAR_START();
324 // attribute any costs to CCS_GC
330 /* Approximate how much we allocated.
331 * Todo: only when generating stats?
333 allocated = calcAllocated();
335 /* Figure out which generation to collect
337 if (force_major_gc) {
338 N = RtsFlags.GcFlags.generations - 1;
342 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
343 if (generations[g].steps[0].n_blocks +
344 generations[g].steps[0].n_large_blocks
345 >= generations[g].max_blocks) {
349 major_gc = (N == RtsFlags.GcFlags.generations-1);
352 #ifdef RTS_GTK_FRONTPANEL
353 if (RtsFlags.GcFlags.frontpanel) {
354 updateFrontPanelBeforeGC(N);
358 // check stack sanity *before* GC (ToDo: check all threads)
360 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
362 IF_DEBUG(sanity, checkFreeListSanity());
364 /* Initialise the static object lists
366 static_objects = END_OF_STATIC_LIST;
367 scavenged_static_objects = END_OF_STATIC_LIST;
369 /* zero the mutable list for the oldest generation (see comment by
370 * zero_mutable_list below).
373 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
376 /* Save the old to-space if we're doing a two-space collection
378 if (RtsFlags.GcFlags.generations == 1) {
379 old_to_blocks = g0s0->to_blocks;
380 g0s0->to_blocks = NULL;
381 g0s0->n_to_blocks = 0;
384 /* Keep a count of how many new blocks we allocated during this GC
385 * (used for resizing the allocation area, later).
389 // Initialise to-space in all the generations/steps that we're
392 for (g = 0; g <= N; g++) {
393 generations[g].mut_once_list = END_MUT_LIST;
394 generations[g].mut_list = END_MUT_LIST;
396 for (s = 0; s < generations[g].n_steps; s++) {
398 // generation 0, step 0 doesn't need to-space
399 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
403 stp = &generations[g].steps[s];
404 ASSERT(stp->gen_no == g);
406 // start a new to-space for this step.
409 stp->to_blocks = NULL;
411 // allocate the first to-space block; extra blocks will be
412 // chained on as necessary.
413 bd = gc_alloc_block(stp);
415 stp->scan = bd->start;
418 // initialise the large object queues.
419 stp->new_large_objects = NULL;
420 stp->scavenged_large_objects = NULL;
421 stp->n_scavenged_large_blocks = 0;
423 // mark the large objects as not evacuated yet
424 for (bd = stp->large_objects; bd; bd = bd->link) {
425 bd->flags &= ~BF_EVACUATED;
428 // for a compacted step, we need to allocate the bitmap
429 if (stp->is_compacted) {
430 nat bitmap_size; // in bytes
431 bdescr *bitmap_bdescr;
434 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
436 if (bitmap_size > 0) {
437 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
439 stp->bitmap = bitmap_bdescr;
440 bitmap = bitmap_bdescr->start;
442 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
443 bitmap_size, bitmap););
445 // don't forget to fill it with zeros!
446 memset(bitmap, 0, bitmap_size);
448 // for each block in this step, point to its bitmap from the
450 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
451 bd->u.bitmap = bitmap;
452 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
459 /* make sure the older generations have at least one block to
460 * allocate into (this makes things easier for copy(), see below).
462 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
463 for (s = 0; s < generations[g].n_steps; s++) {
464 stp = &generations[g].steps[s];
465 if (stp->hp_bd == NULL) {
466 ASSERT(stp->blocks == NULL);
467 bd = gc_alloc_block(stp);
471 /* Set the scan pointer for older generations: remember we
472 * still have to scavenge objects that have been promoted. */
474 stp->scan_bd = stp->hp_bd;
475 stp->to_blocks = NULL;
476 stp->n_to_blocks = 0;
477 stp->new_large_objects = NULL;
478 stp->scavenged_large_objects = NULL;
479 stp->n_scavenged_large_blocks = 0;
483 /* Allocate a mark stack if we're doing a major collection.
486 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
487 mark_stack = (StgPtr *)mark_stack_bdescr->start;
488 mark_sp = mark_stack;
489 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
491 mark_stack_bdescr = NULL;
494 /* -----------------------------------------------------------------------
495 * follow all the roots that we know about:
496 * - mutable lists from each generation > N
497 * we want to *scavenge* these roots, not evacuate them: they're not
498 * going to move in this GC.
499 * Also: do them in reverse generation order. This is because we
500 * often want to promote objects that are pointed to by older
501 * generations early, so we don't have to repeatedly copy them.
502 * Doing the generations in reverse order ensures that we don't end
503 * up in the situation where we want to evac an object to gen 3 and
504 * it has already been evaced to gen 2.
508 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
509 generations[g].saved_mut_list = generations[g].mut_list;
510 generations[g].mut_list = END_MUT_LIST;
513 // Do the mut-once lists first
514 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
515 IF_PAR_DEBUG(verbose,
516 printMutOnceList(&generations[g]));
517 scavenge_mut_once_list(&generations[g]);
519 for (st = generations[g].n_steps-1; st >= 0; st--) {
520 scavenge(&generations[g].steps[st]);
524 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
525 IF_PAR_DEBUG(verbose,
526 printMutableList(&generations[g]));
527 scavenge_mutable_list(&generations[g]);
529 for (st = generations[g].n_steps-1; st >= 0; st--) {
530 scavenge(&generations[g].steps[st]);
535 /* follow roots from the CAF list (used by GHCi)
540 /* follow all the roots that the application knows about.
543 get_roots(mark_root);
546 /* And don't forget to mark the TSO if we got here direct from
548 /* Not needed in a seq version?
550 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
554 // Mark the entries in the GALA table of the parallel system
555 markLocalGAs(major_gc);
556 // Mark all entries on the list of pending fetches
557 markPendingFetches(major_gc);
560 /* Mark the weak pointer list, and prepare to detect dead weak
563 mark_weak_ptr_list(&weak_ptr_list);
564 old_weak_ptr_list = weak_ptr_list;
565 weak_ptr_list = NULL;
566 weak_stage = WeakPtrs;
568 /* The all_threads list is like the weak_ptr_list.
569 * See traverse_weak_ptr_list() for the details.
571 old_all_threads = all_threads;
572 all_threads = END_TSO_QUEUE;
573 resurrected_threads = END_TSO_QUEUE;
575 /* Mark the stable pointer table.
577 markStablePtrTable(mark_root);
581 /* ToDo: To fix the caf leak, we need to make the commented out
582 * parts of this code do something sensible - as described in
585 extern void markHugsObjects(void);
590 /* -------------------------------------------------------------------------
591 * Repeatedly scavenge all the areas we know about until there's no
592 * more scavenging to be done.
599 // scavenge static objects
600 if (major_gc && static_objects != END_OF_STATIC_LIST) {
601 IF_DEBUG(sanity, checkStaticObjects(static_objects));
605 /* When scavenging the older generations: Objects may have been
606 * evacuated from generations <= N into older generations, and we
607 * need to scavenge these objects. We're going to try to ensure that
608 * any evacuations that occur move the objects into at least the
609 * same generation as the object being scavenged, otherwise we
610 * have to create new entries on the mutable list for the older
614 // scavenge each step in generations 0..maxgen
620 // scavenge objects in compacted generation
621 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
622 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
623 scavenge_mark_stack();
627 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
628 for (st = generations[gen].n_steps; --st >= 0; ) {
629 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
632 stp = &generations[gen].steps[st];
634 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
639 if (stp->new_large_objects != NULL) {
648 if (flag) { goto loop; }
650 // must be last... invariant is that everything is fully
651 // scavenged at this point.
652 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
657 /* Update the pointers from the "main thread" list - these are
658 * treated as weak pointers because we want to allow a main thread
659 * to get a BlockedOnDeadMVar exception in the same way as any other
660 * thread. Note that the threads should all have been retained by
661 * GC by virtue of being on the all_threads list, we're just
662 * updating pointers here.
667 for (m = main_threads; m != NULL; m = m->link) {
668 tso = (StgTSO *) isAlive((StgClosure *)m->tso);
670 barf("main thread has been GC'd");
677 // Reconstruct the Global Address tables used in GUM
678 rebuildGAtables(major_gc);
679 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
682 // Now see which stable names are still alive.
685 // Tidy the end of the to-space chains
686 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
687 for (s = 0; s < generations[g].n_steps; s++) {
688 stp = &generations[g].steps[s];
689 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
690 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
691 stp->hp_bd->free = stp->hp;
697 // We call processHeapClosureForDead() on every closure destroyed during
698 // the current garbage collection, so we invoke LdvCensusForDead().
699 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
700 || RtsFlags.ProfFlags.bioSelector != NULL)
704 // NO MORE EVACUATION AFTER THIS POINT!
705 // Finally: compaction of the oldest generation.
706 if (major_gc && oldest_gen->steps[0].is_compacted) {
707 // save number of blocks for stats
708 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
712 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
714 /* run through all the generations/steps and tidy up
716 copied = new_blocks * BLOCK_SIZE_W;
717 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
720 generations[g].collections++; // for stats
723 for (s = 0; s < generations[g].n_steps; s++) {
725 stp = &generations[g].steps[s];
727 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
728 // stats information: how much we copied
730 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
735 // for generations we collected...
738 // rough calculation of garbage collected, for stats output
739 if (stp->is_compacted) {
740 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
742 collected += stp->n_blocks * BLOCK_SIZE_W;
745 /* free old memory and shift to-space into from-space for all
746 * the collected steps (except the allocation area). These
747 * freed blocks will probaby be quickly recycled.
749 if (!(g == 0 && s == 0)) {
750 if (stp->is_compacted) {
751 // for a compacted step, just shift the new to-space
752 // onto the front of the now-compacted existing blocks.
753 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
754 bd->flags &= ~BF_EVACUATED; // now from-space
756 // tack the new blocks on the end of the existing blocks
757 if (stp->blocks == NULL) {
758 stp->blocks = stp->to_blocks;
760 for (bd = stp->blocks; bd != NULL; bd = next) {
763 bd->link = stp->to_blocks;
767 // add the new blocks to the block tally
768 stp->n_blocks += stp->n_to_blocks;
770 freeChain(stp->blocks);
771 stp->blocks = stp->to_blocks;
772 stp->n_blocks = stp->n_to_blocks;
773 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
774 bd->flags &= ~BF_EVACUATED; // now from-space
777 stp->to_blocks = NULL;
778 stp->n_to_blocks = 0;
781 /* LARGE OBJECTS. The current live large objects are chained on
782 * scavenged_large, having been moved during garbage
783 * collection from large_objects. Any objects left on
784 * large_objects list are therefore dead, so we free them here.
786 for (bd = stp->large_objects; bd != NULL; bd = next) {
792 // update the count of blocks used by large objects
793 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
794 bd->flags &= ~BF_EVACUATED;
796 stp->large_objects = stp->scavenged_large_objects;
797 stp->n_large_blocks = stp->n_scavenged_large_blocks;
800 // for older generations...
802 /* For older generations, we need to append the
803 * scavenged_large_object list (i.e. large objects that have been
804 * promoted during this GC) to the large_object list for that step.
806 for (bd = stp->scavenged_large_objects; bd; bd = next) {
808 bd->flags &= ~BF_EVACUATED;
809 dbl_link_onto(bd, &stp->large_objects);
812 // add the new blocks we promoted during this GC
813 stp->n_blocks += stp->n_to_blocks;
814 stp->n_to_blocks = 0;
815 stp->n_large_blocks += stp->n_scavenged_large_blocks;
820 /* Reset the sizes of the older generations when we do a major
823 * CURRENT STRATEGY: make all generations except zero the same size.
824 * We have to stay within the maximum heap size, and leave a certain
825 * percentage of the maximum heap size available to allocate into.
827 if (major_gc && RtsFlags.GcFlags.generations > 1) {
828 nat live, size, min_alloc;
829 nat max = RtsFlags.GcFlags.maxHeapSize;
830 nat gens = RtsFlags.GcFlags.generations;
832 // live in the oldest generations
833 live = oldest_gen->steps[0].n_blocks +
834 oldest_gen->steps[0].n_large_blocks;
836 // default max size for all generations except zero
837 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
838 RtsFlags.GcFlags.minOldGenSize);
840 // minimum size for generation zero
841 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
842 RtsFlags.GcFlags.minAllocAreaSize);
844 // Auto-enable compaction when the residency reaches a
845 // certain percentage of the maximum heap size (default: 30%).
846 if (RtsFlags.GcFlags.generations > 1 &&
847 (RtsFlags.GcFlags.compact ||
849 oldest_gen->steps[0].n_blocks >
850 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
851 oldest_gen->steps[0].is_compacted = 1;
852 // fprintf(stderr,"compaction: on\n", live);
854 oldest_gen->steps[0].is_compacted = 0;
855 // fprintf(stderr,"compaction: off\n", live);
858 // if we're going to go over the maximum heap size, reduce the
859 // size of the generations accordingly. The calculation is
860 // different if compaction is turned on, because we don't need
861 // to double the space required to collect the old generation.
864 // this test is necessary to ensure that the calculations
865 // below don't have any negative results - we're working
866 // with unsigned values here.
867 if (max < min_alloc) {
871 if (oldest_gen->steps[0].is_compacted) {
872 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
873 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
876 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
877 size = (max - min_alloc) / ((gens - 1) * 2);
887 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
888 min_alloc, size, max);
891 for (g = 0; g < gens; g++) {
892 generations[g].max_blocks = size;
896 // Guess the amount of live data for stats.
899 /* Free the small objects allocated via allocate(), since this will
900 * all have been copied into G0S1 now.
902 if (small_alloc_list != NULL) {
903 freeChain(small_alloc_list);
905 small_alloc_list = NULL;
909 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
911 // Start a new pinned_object_block
912 pinned_object_block = NULL;
914 /* Free the mark stack.
916 if (mark_stack_bdescr != NULL) {
917 freeGroup(mark_stack_bdescr);
922 for (g = 0; g <= N; g++) {
923 for (s = 0; s < generations[g].n_steps; s++) {
924 stp = &generations[g].steps[s];
925 if (stp->is_compacted && stp->bitmap != NULL) {
926 freeGroup(stp->bitmap);
931 /* Two-space collector:
932 * Free the old to-space, and estimate the amount of live data.
934 if (RtsFlags.GcFlags.generations == 1) {
937 if (old_to_blocks != NULL) {
938 freeChain(old_to_blocks);
940 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
941 bd->flags = 0; // now from-space
944 /* For a two-space collector, we need to resize the nursery. */
946 /* set up a new nursery. Allocate a nursery size based on a
947 * function of the amount of live data (by default a factor of 2)
948 * Use the blocks from the old nursery if possible, freeing up any
951 * If we get near the maximum heap size, then adjust our nursery
952 * size accordingly. If the nursery is the same size as the live
953 * data (L), then we need 3L bytes. We can reduce the size of the
954 * nursery to bring the required memory down near 2L bytes.
956 * A normal 2-space collector would need 4L bytes to give the same
957 * performance we get from 3L bytes, reducing to the same
958 * performance at 2L bytes.
960 blocks = g0s0->n_to_blocks;
962 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
963 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
964 RtsFlags.GcFlags.maxHeapSize ) {
965 long adjusted_blocks; // signed on purpose
968 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
969 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
970 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
971 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
974 blocks = adjusted_blocks;
977 blocks *= RtsFlags.GcFlags.oldGenFactor;
978 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
979 blocks = RtsFlags.GcFlags.minAllocAreaSize;
982 resizeNursery(blocks);
985 /* Generational collector:
986 * If the user has given us a suggested heap size, adjust our
987 * allocation area to make best use of the memory available.
990 if (RtsFlags.GcFlags.heapSizeSuggestion) {
992 nat needed = calcNeeded(); // approx blocks needed at next GC
994 /* Guess how much will be live in generation 0 step 0 next time.
995 * A good approximation is obtained by finding the
996 * percentage of g0s0 that was live at the last minor GC.
999 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
1002 /* Estimate a size for the allocation area based on the
1003 * information available. We might end up going slightly under
1004 * or over the suggested heap size, but we should be pretty
1007 * Formula: suggested - needed
1008 * ----------------------------
1009 * 1 + g0s0_pcnt_kept/100
1011 * where 'needed' is the amount of memory needed at the next
1012 * collection for collecting all steps except g0s0.
1015 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
1016 (100 + (long)g0s0_pcnt_kept);
1018 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
1019 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1022 resizeNursery((nat)blocks);
1025 // we might have added extra large blocks to the nursery, so
1026 // resize back to minAllocAreaSize again.
1027 resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
1031 // mark the garbage collected CAFs as dead
1032 #if 0 && defined(DEBUG) // doesn't work at the moment
1033 if (major_gc) { gcCAFs(); }
1037 // resetStaticObjectForRetainerProfiling() must be called before
1039 resetStaticObjectForRetainerProfiling();
1042 // zero the scavenged static object list
1044 zero_static_object_list(scavenged_static_objects);
1047 // Reset the nursery
1050 RELEASE_LOCK(&sched_mutex);
1052 // start any pending finalizers
1053 scheduleFinalizers(old_weak_ptr_list);
1055 // send exceptions to any threads which were about to die
1056 resurrectThreads(resurrected_threads);
1058 ACQUIRE_LOCK(&sched_mutex);
1060 // Update the stable pointer hash table.
1061 updateStablePtrTable(major_gc);
1063 // check sanity after GC
1064 IF_DEBUG(sanity, checkSanity());
1066 // extra GC trace info
1067 IF_DEBUG(gc, statDescribeGens());
1070 // symbol-table based profiling
1071 /* heapCensus(to_blocks); */ /* ToDo */
1074 // restore enclosing cost centre
1079 // check for memory leaks if sanity checking is on
1080 IF_DEBUG(sanity, memInventory());
1082 #ifdef RTS_GTK_FRONTPANEL
1083 if (RtsFlags.GcFlags.frontpanel) {
1084 updateFrontPanelAfterGC( N, live );
1088 // ok, GC over: tell the stats department what happened.
1089 stat_endGC(allocated, collected, live, copied, N);
1091 #if defined(RTS_USER_SIGNALS)
1092 // unblock signals again
1093 unblockUserSignals();
1100 /* -----------------------------------------------------------------------------
1103 traverse_weak_ptr_list is called possibly many times during garbage
1104 collection. It returns a flag indicating whether it did any work
1105 (i.e. called evacuate on any live pointers).
1107 Invariant: traverse_weak_ptr_list is called when the heap is in an
1108 idempotent state. That means that there are no pending
1109 evacuate/scavenge operations. This invariant helps the weak
1110 pointer code decide which weak pointers are dead - if there are no
1111 new live weak pointers, then all the currently unreachable ones are
1114 For generational GC: we just don't try to finalize weak pointers in
1115 older generations than the one we're collecting. This could
1116 probably be optimised by keeping per-generation lists of weak
1117 pointers, but for a few weak pointers this scheme will work.
1119 There are three distinct stages to processing weak pointers:
1121 - weak_stage == WeakPtrs
1123 We process all the weak pointers whos keys are alive (evacuate
1124 their values and finalizers), and repeat until we can find no new
1125 live keys. If no live keys are found in this pass, then we
1126 evacuate the finalizers of all the dead weak pointers in order to
1129 - weak_stage == WeakThreads
1131 Now, we discover which *threads* are still alive. Pointers to
1132 threads from the all_threads and main thread lists are the
1133 weakest of all: a pointers from the finalizer of a dead weak
1134 pointer can keep a thread alive. Any threads found to be unreachable
1135 are evacuated and placed on the resurrected_threads list so we
1136 can send them a signal later.
1138 - weak_stage == WeakDone
1140 No more evacuation is done.
1142 -------------------------------------------------------------------------- */
1145 traverse_weak_ptr_list(void)
1147 StgWeak *w, **last_w, *next_w;
1149 rtsBool flag = rtsFalse;
1151 switch (weak_stage) {
1157 /* doesn't matter where we evacuate values/finalizers to, since
1158 * these pointers are treated as roots (iff the keys are alive).
1162 last_w = &old_weak_ptr_list;
1163 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1165 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1166 * called on a live weak pointer object. Just remove it.
1168 if (w->header.info == &stg_DEAD_WEAK_info) {
1169 next_w = ((StgDeadWeak *)w)->link;
1174 switch (get_itbl(w)->type) {
1177 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1182 /* Now, check whether the key is reachable.
1184 new = isAlive(w->key);
1187 // evacuate the value and finalizer
1188 w->value = evacuate(w->value);
1189 w->finalizer = evacuate(w->finalizer);
1190 // remove this weak ptr from the old_weak_ptr list
1192 // and put it on the new weak ptr list
1194 w->link = weak_ptr_list;
1197 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p",
1202 last_w = &(w->link);
1208 barf("traverse_weak_ptr_list: not WEAK");
1212 /* If we didn't make any changes, then we can go round and kill all
1213 * the dead weak pointers. The old_weak_ptr list is used as a list
1214 * of pending finalizers later on.
1216 if (flag == rtsFalse) {
1217 for (w = old_weak_ptr_list; w; w = w->link) {
1218 w->finalizer = evacuate(w->finalizer);
1221 // Next, move to the WeakThreads stage after fully
1222 // scavenging the finalizers we've just evacuated.
1223 weak_stage = WeakThreads;
1229 /* Now deal with the all_threads list, which behaves somewhat like
1230 * the weak ptr list. If we discover any threads that are about to
1231 * become garbage, we wake them up and administer an exception.
1234 StgTSO *t, *tmp, *next, **prev;
1236 prev = &old_all_threads;
1237 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1239 (StgClosure *)tmp = isAlive((StgClosure *)t);
1245 ASSERT(get_itbl(t)->type == TSO);
1246 switch (t->what_next) {
1247 case ThreadRelocated:
1252 case ThreadComplete:
1253 // finshed or died. The thread might still be alive, but we
1254 // don't keep it on the all_threads list. Don't forget to
1255 // stub out its global_link field.
1256 next = t->global_link;
1257 t->global_link = END_TSO_QUEUE;
1265 // not alive (yet): leave this thread on the
1266 // old_all_threads list.
1267 prev = &(t->global_link);
1268 next = t->global_link;
1271 // alive: move this thread onto the all_threads list.
1272 next = t->global_link;
1273 t->global_link = all_threads;
1280 /* And resurrect any threads which were about to become garbage.
1283 StgTSO *t, *tmp, *next;
1284 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1285 next = t->global_link;
1286 (StgClosure *)tmp = evacuate((StgClosure *)t);
1287 tmp->global_link = resurrected_threads;
1288 resurrected_threads = tmp;
1292 weak_stage = WeakDone; // *now* we're done,
1293 return rtsTrue; // but one more round of scavenging, please
1296 barf("traverse_weak_ptr_list");
1301 /* -----------------------------------------------------------------------------
1302 After GC, the live weak pointer list may have forwarding pointers
1303 on it, because a weak pointer object was evacuated after being
1304 moved to the live weak pointer list. We remove those forwarding
1307 Also, we don't consider weak pointer objects to be reachable, but
1308 we must nevertheless consider them to be "live" and retain them.
1309 Therefore any weak pointer objects which haven't as yet been
1310 evacuated need to be evacuated now.
1311 -------------------------------------------------------------------------- */
1315 mark_weak_ptr_list ( StgWeak **list )
1317 StgWeak *w, **last_w;
1320 for (w = *list; w; w = w->link) {
1321 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
1322 ASSERT(w->header.info == &stg_DEAD_WEAK_info
1323 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
1324 (StgClosure *)w = evacuate((StgClosure *)w);
1326 last_w = &(w->link);
1330 /* -----------------------------------------------------------------------------
1331 isAlive determines whether the given closure is still alive (after
1332 a garbage collection) or not. It returns the new address of the
1333 closure if it is alive, or NULL otherwise.
1335 NOTE: Use it before compaction only!
1336 -------------------------------------------------------------------------- */
1340 isAlive(StgClosure *p)
1342 const StgInfoTable *info;
1347 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1350 // ignore static closures
1352 // ToDo: for static closures, check the static link field.
1353 // Problem here is that we sometimes don't set the link field, eg.
1354 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1356 if (!HEAP_ALLOCED(p)) {
1360 // ignore closures in generations that we're not collecting.
1362 if (bd->gen_no > N) {
1366 // if it's a pointer into to-space, then we're done
1367 if (bd->flags & BF_EVACUATED) {
1371 // large objects use the evacuated flag
1372 if (bd->flags & BF_LARGE) {
1376 // check the mark bit for compacted steps
1377 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1381 switch (info->type) {
1386 case IND_OLDGEN: // rely on compatible layout with StgInd
1387 case IND_OLDGEN_PERM:
1388 // follow indirections
1389 p = ((StgInd *)p)->indirectee;
1394 return ((StgEvacuated *)p)->evacuee;
1397 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1398 p = (StgClosure *)((StgTSO *)p)->link;
1411 mark_root(StgClosure **root)
1413 *root = evacuate(*root);
1416 static __inline__ void
1417 upd_evacuee(StgClosure *p, StgClosure *dest)
1419 // Source object must be in from-space:
1420 ASSERT((Bdescr((P_)p)->flags & BF_EVACUATED) == 0);
1421 // not true: (ToDo: perhaps it should be)
1422 // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
1423 p->header.info = &stg_EVACUATED_info;
1424 ((StgEvacuated *)p)->evacuee = dest;
1428 static __inline__ StgClosure *
1429 copy(StgClosure *src, nat size, step *stp)
1434 nat size_org = size;
1437 TICK_GC_WORDS_COPIED(size);
1438 /* Find out where we're going, using the handy "to" pointer in
1439 * the step of the source object. If it turns out we need to
1440 * evacuate to an older generation, adjust it here (see comment
1443 if (stp->gen_no < evac_gen) {
1444 #ifdef NO_EAGER_PROMOTION
1445 failed_to_evac = rtsTrue;
1447 stp = &generations[evac_gen].steps[0];
1451 /* chain a new block onto the to-space for the destination step if
1454 if (stp->hp + size >= stp->hpLim) {
1455 gc_alloc_block(stp);
1458 for(to = stp->hp, from = (P_)src; size>0; --size) {
1464 upd_evacuee(src,(StgClosure *)dest);
1466 // We store the size of the just evacuated object in the LDV word so that
1467 // the profiler can guess the position of the next object later.
1468 SET_EVACUAEE_FOR_LDV(src, size_org);
1470 return (StgClosure *)dest;
1473 /* Special version of copy() for when we only want to copy the info
1474 * pointer of an object, but reserve some padding after it. This is
1475 * used to optimise evacuation of BLACKHOLEs.
1480 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1485 nat size_to_copy_org = size_to_copy;
1488 TICK_GC_WORDS_COPIED(size_to_copy);
1489 if (stp->gen_no < evac_gen) {
1490 #ifdef NO_EAGER_PROMOTION
1491 failed_to_evac = rtsTrue;
1493 stp = &generations[evac_gen].steps[0];
1497 if (stp->hp + size_to_reserve >= stp->hpLim) {
1498 gc_alloc_block(stp);
1501 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1506 stp->hp += size_to_reserve;
1507 upd_evacuee(src,(StgClosure *)dest);
1509 // We store the size of the just evacuated object in the LDV word so that
1510 // the profiler can guess the position of the next object later.
1511 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1513 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1515 if (size_to_reserve - size_to_copy_org > 0)
1516 FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1518 return (StgClosure *)dest;
1522 /* -----------------------------------------------------------------------------
1523 Evacuate a large object
1525 This just consists of removing the object from the (doubly-linked)
1526 step->large_objects list, and linking it on to the (singly-linked)
1527 step->new_large_objects list, from where it will be scavenged later.
1529 Convention: bd->flags has BF_EVACUATED set for a large object
1530 that has been evacuated, or unset otherwise.
1531 -------------------------------------------------------------------------- */
1535 evacuate_large(StgPtr p)
1537 bdescr *bd = Bdescr(p);
1540 // object must be at the beginning of the block (or be a ByteArray)
1541 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1542 (((W_)p & BLOCK_MASK) == 0));
1544 // already evacuated?
1545 if (bd->flags & BF_EVACUATED) {
1546 /* Don't forget to set the failed_to_evac flag if we didn't get
1547 * the desired destination (see comments in evacuate()).
1549 if (bd->gen_no < evac_gen) {
1550 failed_to_evac = rtsTrue;
1551 TICK_GC_FAILED_PROMOTION();
1557 // remove from large_object list
1559 bd->u.back->link = bd->link;
1560 } else { // first object in the list
1561 stp->large_objects = bd->link;
1564 bd->link->u.back = bd->u.back;
1567 /* link it on to the evacuated large object list of the destination step
1570 if (stp->gen_no < evac_gen) {
1571 #ifdef NO_EAGER_PROMOTION
1572 failed_to_evac = rtsTrue;
1574 stp = &generations[evac_gen].steps[0];
1579 bd->gen_no = stp->gen_no;
1580 bd->link = stp->new_large_objects;
1581 stp->new_large_objects = bd;
1582 bd->flags |= BF_EVACUATED;
1585 /* -----------------------------------------------------------------------------
1586 Adding a MUT_CONS to an older generation.
1588 This is necessary from time to time when we end up with an
1589 old-to-new generation pointer in a non-mutable object. We defer
1590 the promotion until the next GC.
1591 -------------------------------------------------------------------------- */
1594 mkMutCons(StgClosure *ptr, generation *gen)
1599 stp = &gen->steps[0];
1601 /* chain a new block onto the to-space for the destination step if
1604 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1605 gc_alloc_block(stp);
1608 q = (StgMutVar *)stp->hp;
1609 stp->hp += sizeofW(StgMutVar);
1611 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1613 recordOldToNewPtrs((StgMutClosure *)q);
1615 return (StgClosure *)q;
1618 /* -----------------------------------------------------------------------------
1621 This is called (eventually) for every live object in the system.
1623 The caller to evacuate specifies a desired generation in the
1624 evac_gen global variable. The following conditions apply to
1625 evacuating an object which resides in generation M when we're
1626 collecting up to generation N
1630 else evac to step->to
1632 if M < evac_gen evac to evac_gen, step 0
1634 if the object is already evacuated, then we check which generation
1637 if M >= evac_gen do nothing
1638 if M < evac_gen set failed_to_evac flag to indicate that we
1639 didn't manage to evacuate this object into evac_gen.
1644 evacuate() is the single most important function performance-wise
1645 in the GC. Various things have been tried to speed it up, but as
1646 far as I can tell the code generated by gcc 3.2 with -O2 is about
1647 as good as it's going to get. We pass the argument to evacuate()
1648 in a register using the 'regparm' attribute (see the prototype for
1649 evacuate() near the top of this file).
1651 Changing evacuate() to take an (StgClosure **) rather than
1652 returning the new pointer seems attractive, because we can avoid
1653 writing back the pointer when it hasn't changed (eg. for a static
1654 object, or an object in a generation > N). However, I tried it and
1655 it doesn't help. One reason is that the (StgClosure **) pointer
1656 gets spilled to the stack inside evacuate(), resulting in far more
1657 extra reads/writes than we save.
1658 -------------------------------------------------------------------------- */
1661 evacuate(StgClosure *q)
1666 const StgInfoTable *info;
1669 if (HEAP_ALLOCED(q)) {
1672 if (bd->gen_no > N) {
1673 /* Can't evacuate this object, because it's in a generation
1674 * older than the ones we're collecting. Let's hope that it's
1675 * in evac_gen or older, or we will have to arrange to track
1676 * this pointer using the mutable list.
1678 if (bd->gen_no < evac_gen) {
1680 failed_to_evac = rtsTrue;
1681 TICK_GC_FAILED_PROMOTION();
1686 /* evacuate large objects by re-linking them onto a different list.
1688 if (bd->flags & BF_LARGE) {
1690 if (info->type == TSO &&
1691 ((StgTSO *)q)->what_next == ThreadRelocated) {
1692 q = (StgClosure *)((StgTSO *)q)->link;
1695 evacuate_large((P_)q);
1699 /* If the object is in a step that we're compacting, then we
1700 * need to use an alternative evacuate procedure.
1702 if (bd->step->is_compacted) {
1703 if (!is_marked((P_)q,bd)) {
1705 if (mark_stack_full()) {
1706 mark_stack_overflowed = rtsTrue;
1709 push_mark_stack((P_)q);
1717 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1720 // make sure the info pointer is into text space
1721 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
1724 switch (info -> type) {
1728 return copy(q,sizeW_fromITBL(info),stp);
1732 StgWord w = (StgWord)q->payload[0];
1733 if (q->header.info == Czh_con_info &&
1734 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1735 (StgChar)w <= MAX_CHARLIKE) {
1736 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1738 if (q->header.info == Izh_con_info &&
1739 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1740 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1742 // else, fall through ...
1748 return copy(q,sizeofW(StgHeader)+1,stp);
1750 case THUNK_1_0: // here because of MIN_UPD_SIZE
1755 #ifdef NO_PROMOTE_THUNKS
1756 if (bd->gen_no == 0 &&
1757 bd->step->no != 0 &&
1758 bd->step->no == generations[bd->gen_no].n_steps-1) {
1762 return copy(q,sizeofW(StgHeader)+2,stp);
1770 return copy(q,sizeofW(StgHeader)+2,stp);
1776 case IND_OLDGEN_PERM:
1780 return copy(q,sizeW_fromITBL(info),stp);
1783 return copy(q,bco_sizeW((StgBCO *)q),stp);
1786 case SE_CAF_BLACKHOLE:
1789 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1792 to = copy(q,BLACKHOLE_sizeW(),stp);
1795 case THUNK_SELECTOR:
1799 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
1800 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1803 p = eval_thunk_selector(info->layout.selector_offset,
1807 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1809 // q is still BLACKHOLE'd.
1810 thunk_selector_depth++;
1812 thunk_selector_depth--;
1815 // We store the size of the just evacuated object in the
1816 // LDV word so that the profiler can guess the position of
1817 // the next object later.
1818 SET_EVACUAEE_FOR_LDV(q, THUNK_SELECTOR_sizeW());
1826 // follow chains of indirections, don't evacuate them
1827 q = ((StgInd*)q)->indirectee;
1831 if (info->srt_bitmap != 0 && major_gc &&
1832 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1833 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1834 static_objects = (StgClosure *)q;
1839 if (info->srt_bitmap != 0 && major_gc &&
1840 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1841 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1842 static_objects = (StgClosure *)q;
1847 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1848 * on the CAF list, so don't do anything with it here (we'll
1849 * scavenge it later).
1852 && ((StgIndStatic *)q)->saved_info == NULL
1853 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1854 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1855 static_objects = (StgClosure *)q;
1860 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1861 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1862 static_objects = (StgClosure *)q;
1866 case CONSTR_INTLIKE:
1867 case CONSTR_CHARLIKE:
1868 case CONSTR_NOCAF_STATIC:
1869 /* no need to put these on the static linked list, they don't need
1883 // shouldn't see these
1884 barf("evacuate: stack frame at %p\n", q);
1888 return copy(q,pap_sizeW((StgPAP*)q),stp);
1891 return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
1894 /* Already evacuated, just return the forwarding address.
1895 * HOWEVER: if the requested destination generation (evac_gen) is
1896 * older than the actual generation (because the object was
1897 * already evacuated to a younger generation) then we have to
1898 * set the failed_to_evac flag to indicate that we couldn't
1899 * manage to promote the object to the desired generation.
1901 if (evac_gen > 0) { // optimisation
1902 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1903 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
1904 failed_to_evac = rtsTrue;
1905 TICK_GC_FAILED_PROMOTION();
1908 return ((StgEvacuated*)q)->evacuee;
1911 // just copy the block
1912 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1915 case MUT_ARR_PTRS_FROZEN:
1916 // just copy the block
1917 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1921 StgTSO *tso = (StgTSO *)q;
1923 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1925 if (tso->what_next == ThreadRelocated) {
1926 q = (StgClosure *)tso->link;
1930 /* To evacuate a small TSO, we need to relocate the update frame
1937 new_tso = (StgTSO *)copyPart((StgClosure *)tso,
1939 sizeofW(StgTSO), stp);
1940 move_TSO(tso, new_tso);
1941 for (p = tso->sp, q = new_tso->sp;
1942 p < tso->stack+tso->stack_size;) {
1946 return (StgClosure *)new_tso;
1951 case RBH: // cf. BLACKHOLE_BQ
1953 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1954 to = copy(q,BLACKHOLE_sizeW(),stp);
1955 //ToDo: derive size etc from reverted IP
1956 //to = copy(q,size,stp);
1958 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1959 q, info_type(q), to, info_type(to)));
1964 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1965 to = copy(q,sizeofW(StgBlockedFetch),stp);
1967 belch("@@ evacuate: %p (%s) to %p (%s)",
1968 q, info_type(q), to, info_type(to)));
1975 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1976 to = copy(q,sizeofW(StgFetchMe),stp);
1978 belch("@@ evacuate: %p (%s) to %p (%s)",
1979 q, info_type(q), to, info_type(to)));
1983 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1984 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1986 belch("@@ evacuate: %p (%s) to %p (%s)",
1987 q, info_type(q), to, info_type(to)));
1992 barf("evacuate: strange closure type %d", (int)(info->type));
1998 /* -----------------------------------------------------------------------------
1999 Evaluate a THUNK_SELECTOR if possible.
2001 returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
2002 a closure pointer if we evaluated it and this is the result. Note
2003 that "evaluating" the THUNK_SELECTOR doesn't necessarily mean
2004 reducing it to HNF, just that we have eliminated the selection.
2005 The result might be another thunk, or even another THUNK_SELECTOR.
2007 If the return value is non-NULL, the original selector thunk has
2008 been BLACKHOLE'd, and should be updated with an indirection or a
2009 forwarding pointer. If the return value is NULL, then the selector
2011 -------------------------------------------------------------------------- */
2014 eval_thunk_selector( nat field, StgSelector * p )
2017 const StgInfoTable *info_ptr;
2018 StgClosure *selectee;
2020 selectee = p->selectee;
2022 // Save the real info pointer (NOTE: not the same as get_itbl()).
2023 info_ptr = p->header.info;
2025 // If the THUNK_SELECTOR is in a generation that we are not
2026 // collecting, then bail out early. We won't be able to save any
2027 // space in any case, and updating with an indirection is trickier
2029 if (Bdescr((StgPtr)p)->gen_no > N) {
2033 // BLACKHOLE the selector thunk, since it is now under evaluation.
2034 // This is important to stop us going into an infinite loop if
2035 // this selector thunk eventually refers to itself.
2036 SET_INFO(p,&stg_BLACKHOLE_info);
2040 // We don't want to end up in to-space, because this causes
2041 // problems when the GC later tries to evacuate the result of
2042 // eval_thunk_selector(). There are various ways this could
2045 // - following an IND_STATIC
2047 // - when the old generation is compacted, the mark phase updates
2048 // from-space pointers to be to-space pointers, and we can't
2049 // reliably tell which we're following (eg. from an IND_STATIC).
2051 // So we use the block-descriptor test to find out if we're in
2054 if (HEAP_ALLOCED(selectee) &&
2055 Bdescr((StgPtr)selectee)->flags & BF_EVACUATED) {
2059 info = get_itbl(selectee);
2060 switch (info->type) {
2068 case CONSTR_NOCAF_STATIC:
2069 // check that the size is in range
2070 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
2071 info->layout.payload.nptrs));
2073 // ToDo: shouldn't we test whether this pointer is in
2075 return selectee->payload[field];
2080 case IND_OLDGEN_PERM:
2082 selectee = ((StgInd *)selectee)->indirectee;
2086 // We don't follow pointers into to-space; the constructor
2087 // has already been evacuated, so we won't save any space
2088 // leaks by evaluating this selector thunk anyhow.
2091 case THUNK_SELECTOR:
2095 // check that we don't recurse too much, re-using the
2096 // depth bound also used in evacuate().
2097 thunk_selector_depth++;
2098 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
2102 val = eval_thunk_selector(info->layout.selector_offset,
2103 (StgSelector *)selectee);
2105 thunk_selector_depth--;
2110 // We evaluated this selector thunk, so update it with
2111 // an indirection. NOTE: we don't use UPD_IND here,
2112 // because we are guaranteed that p is in a generation
2113 // that we are collecting, and we never want to put the
2114 // indirection on a mutable list.
2116 // For the purposes of LDV profiling, we have destroyed
2117 // the original selector thunk.
2118 SET_INFO(p, info_ptr);
2119 LDV_recordDead_FILL_SLOP_DYNAMIC(selectee);
2121 ((StgInd *)selectee)->indirectee = val;
2122 SET_INFO(selectee,&stg_IND_info);
2124 // For the purposes of LDV profiling, we have created an
2126 LDV_recordCreate(selectee);
2143 case SE_CAF_BLACKHOLE:
2156 // not evaluated yet
2160 barf("eval_thunk_selector: strange selectee %d",
2165 // We didn't manage to evaluate this thunk; restore the old info pointer
2166 SET_INFO(p, info_ptr);
2170 /* -----------------------------------------------------------------------------
2171 move_TSO is called to update the TSO structure after it has been
2172 moved from one place to another.
2173 -------------------------------------------------------------------------- */
2176 move_TSO (StgTSO *src, StgTSO *dest)
2180 // relocate the stack pointer...
2181 diff = (StgPtr)dest - (StgPtr)src; // In *words*
2182 dest->sp = (StgPtr)dest->sp + diff;
2185 /* Similar to scavenge_large_bitmap(), but we don't write back the
2186 * pointers we get back from evacuate().
2189 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
2196 bitmap = large_srt->l.bitmap[b];
2197 size = (nat)large_srt->l.size;
2198 p = (StgClosure **)large_srt->srt;
2199 for (i = 0; i < size; ) {
2200 if ((bitmap & 1) != 0) {
2205 if (i % BITS_IN(W_) == 0) {
2207 bitmap = large_srt->l.bitmap[b];
2209 bitmap = bitmap >> 1;
2214 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
2215 * srt field in the info table. That's ok, because we'll
2216 * never dereference it.
2219 scavenge_srt (StgClosure **srt, nat srt_bitmap)
2224 bitmap = srt_bitmap;
2227 if (bitmap == (StgHalfWord)(-1)) {
2228 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
2232 while (bitmap != 0) {
2233 if ((bitmap & 1) != 0) {
2234 #ifdef ENABLE_WIN32_DLL_SUPPORT
2235 // Special-case to handle references to closures hiding out in DLLs, since
2236 // double indirections required to get at those. The code generator knows
2237 // which is which when generating the SRT, so it stores the (indirect)
2238 // reference to the DLL closure in the table by first adding one to it.
2239 // We check for this here, and undo the addition before evacuating it.
2241 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2242 // closure that's fixed at link-time, and no extra magic is required.
2243 if ( (unsigned long)(*srt) & 0x1 ) {
2244 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2253 bitmap = bitmap >> 1;
2259 scavenge_thunk_srt(const StgInfoTable *info)
2261 StgThunkInfoTable *thunk_info;
2263 thunk_info = itbl_to_thunk_itbl(info);
2264 scavenge_srt((StgClosure **)thunk_info->srt, thunk_info->i.srt_bitmap);
2268 scavenge_fun_srt(const StgInfoTable *info)
2270 StgFunInfoTable *fun_info;
2272 fun_info = itbl_to_fun_itbl(info);
2273 scavenge_srt((StgClosure **)fun_info->srt, fun_info->i.srt_bitmap);
2277 scavenge_ret_srt(const StgInfoTable *info)
2279 StgRetInfoTable *ret_info;
2281 ret_info = itbl_to_ret_itbl(info);
2282 scavenge_srt((StgClosure **)ret_info->srt, ret_info->i.srt_bitmap);
2285 /* -----------------------------------------------------------------------------
2287 -------------------------------------------------------------------------- */
2290 scavengeTSO (StgTSO *tso)
2292 // chase the link field for any TSOs on the same queue
2293 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2294 if ( tso->why_blocked == BlockedOnMVar
2295 || tso->why_blocked == BlockedOnBlackHole
2296 || tso->why_blocked == BlockedOnException
2298 || tso->why_blocked == BlockedOnGA
2299 || tso->why_blocked == BlockedOnGA_NoSend
2302 tso->block_info.closure = evacuate(tso->block_info.closure);
2304 if ( tso->blocked_exceptions != NULL ) {
2305 tso->blocked_exceptions =
2306 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2309 // scavenge this thread's stack
2310 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2313 /* -----------------------------------------------------------------------------
2314 Blocks of function args occur on the stack (at the top) and
2316 -------------------------------------------------------------------------- */
2318 static inline StgPtr
2319 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
2326 switch (fun_info->fun_type) {
2328 bitmap = BITMAP_BITS(fun_info->bitmap);
2329 size = BITMAP_SIZE(fun_info->bitmap);
2332 size = ((StgLargeBitmap *)fun_info->bitmap)->size;
2333 scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size);
2337 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]);
2338 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->fun_type]);
2341 if ((bitmap & 1) == 0) {
2342 (StgClosure *)*p = evacuate((StgClosure *)*p);
2345 bitmap = bitmap >> 1;
2353 static inline StgPtr
2354 scavenge_PAP (StgPAP *pap)
2357 StgWord bitmap, size;
2358 StgFunInfoTable *fun_info;
2360 pap->fun = evacuate(pap->fun);
2361 fun_info = get_fun_itbl(pap->fun);
2362 ASSERT(fun_info->i.type != PAP);
2364 p = (StgPtr)pap->payload;
2367 switch (fun_info->fun_type) {
2369 bitmap = BITMAP_BITS(fun_info->bitmap);
2372 scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size);
2376 scavenge_large_bitmap((StgPtr)pap->payload, BCO_BITMAP(pap->fun), size);
2380 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]);
2384 if ((bitmap & 1) == 0) {
2385 (StgClosure *)*p = evacuate((StgClosure *)*p);
2388 bitmap = bitmap >> 1;
2396 /* -----------------------------------------------------------------------------
2397 Scavenge a given step until there are no more objects in this step
2400 evac_gen is set by the caller to be either zero (for a step in a
2401 generation < N) or G where G is the generation of the step being
2404 We sometimes temporarily change evac_gen back to zero if we're
2405 scavenging a mutable object where early promotion isn't such a good
2407 -------------------------------------------------------------------------- */
2415 nat saved_evac_gen = evac_gen;
2420 failed_to_evac = rtsFalse;
2422 /* scavenge phase - standard breadth-first scavenging of the
2426 while (bd != stp->hp_bd || p < stp->hp) {
2428 // If we're at the end of this block, move on to the next block
2429 if (bd != stp->hp_bd && p == bd->free) {
2435 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
2436 info = get_itbl((StgClosure *)p);
2438 ASSERT(thunk_selector_depth == 0);
2441 switch (info->type) {
2444 /* treat MVars specially, because we don't want to evacuate the
2445 * mut_link field in the middle of the closure.
2448 StgMVar *mvar = ((StgMVar *)p);
2450 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2451 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2452 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2453 evac_gen = saved_evac_gen;
2454 recordMutable((StgMutClosure *)mvar);
2455 failed_to_evac = rtsFalse; // mutable.
2456 p += sizeofW(StgMVar);
2461 scavenge_fun_srt(info);
2462 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2463 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2464 p += sizeofW(StgHeader) + 2;
2468 scavenge_thunk_srt(info);
2470 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2471 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2472 p += sizeofW(StgHeader) + 2;
2476 scavenge_thunk_srt(info);
2477 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2478 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2482 scavenge_fun_srt(info);
2484 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2485 p += sizeofW(StgHeader) + 1;
2489 scavenge_thunk_srt(info);
2490 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2494 scavenge_fun_srt(info);
2496 p += sizeofW(StgHeader) + 1;
2500 scavenge_thunk_srt(info);
2501 p += sizeofW(StgHeader) + 2;
2505 scavenge_fun_srt(info);
2507 p += sizeofW(StgHeader) + 2;
2511 scavenge_thunk_srt(info);
2512 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2513 p += sizeofW(StgHeader) + 2;
2517 scavenge_fun_srt(info);
2519 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2520 p += sizeofW(StgHeader) + 2;
2524 scavenge_fun_srt(info);
2528 scavenge_thunk_srt(info);
2539 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2540 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2541 (StgClosure *)*p = evacuate((StgClosure *)*p);
2543 p += info->layout.payload.nptrs;
2548 StgBCO *bco = (StgBCO *)p;
2549 (StgClosure *)bco->instrs = evacuate((StgClosure *)bco->instrs);
2550 (StgClosure *)bco->literals = evacuate((StgClosure *)bco->literals);
2551 (StgClosure *)bco->ptrs = evacuate((StgClosure *)bco->ptrs);
2552 (StgClosure *)bco->itbls = evacuate((StgClosure *)bco->itbls);
2553 p += bco_sizeW(bco);
2558 if (stp->gen->no != 0) {
2561 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2562 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2563 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2566 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
2568 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2571 // We pretend that p has just been created.
2572 LDV_recordCreate((StgClosure *)p);
2576 case IND_OLDGEN_PERM:
2577 ((StgIndOldGen *)p)->indirectee =
2578 evacuate(((StgIndOldGen *)p)->indirectee);
2579 if (failed_to_evac) {
2580 failed_to_evac = rtsFalse;
2581 recordOldToNewPtrs((StgMutClosure *)p);
2583 p += sizeofW(StgIndOldGen);
2588 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2589 evac_gen = saved_evac_gen;
2590 recordMutable((StgMutClosure *)p);
2591 failed_to_evac = rtsFalse; // mutable anyhow
2592 p += sizeofW(StgMutVar);
2597 failed_to_evac = rtsFalse; // mutable anyhow
2598 p += sizeofW(StgMutVar);
2602 case SE_CAF_BLACKHOLE:
2605 p += BLACKHOLE_sizeW();
2610 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2611 (StgClosure *)bh->blocking_queue =
2612 evacuate((StgClosure *)bh->blocking_queue);
2613 recordMutable((StgMutClosure *)bh);
2614 failed_to_evac = rtsFalse;
2615 p += BLACKHOLE_sizeW();
2619 case THUNK_SELECTOR:
2621 StgSelector *s = (StgSelector *)p;
2622 s->selectee = evacuate(s->selectee);
2623 p += THUNK_SELECTOR_sizeW();
2627 // A chunk of stack saved in a heap object
2630 StgAP_STACK *ap = (StgAP_STACK *)p;
2632 ap->fun = evacuate(ap->fun);
2633 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
2634 p = (StgPtr)ap->payload + ap->size;
2640 p = scavenge_PAP((StgPAP *)p);
2644 // nothing to follow
2645 p += arr_words_sizeW((StgArrWords *)p);
2649 // follow everything
2653 evac_gen = 0; // repeatedly mutable
2654 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2655 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2656 (StgClosure *)*p = evacuate((StgClosure *)*p);
2658 evac_gen = saved_evac_gen;
2659 recordMutable((StgMutClosure *)q);
2660 failed_to_evac = rtsFalse; // mutable anyhow.
2664 case MUT_ARR_PTRS_FROZEN:
2665 // follow everything
2669 // Set the mut_link field to NULL, so that we will put this
2670 // array back on the mutable list if it is subsequently thawed
2672 ((StgMutArrPtrs*)p)->mut_link = NULL;
2674 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2675 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2676 (StgClosure *)*p = evacuate((StgClosure *)*p);
2678 // it's tempting to recordMutable() if failed_to_evac is
2679 // false, but that breaks some assumptions (eg. every
2680 // closure on the mutable list is supposed to have the MUT
2681 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2687 StgTSO *tso = (StgTSO *)p;
2690 evac_gen = saved_evac_gen;
2691 recordMutable((StgMutClosure *)tso);
2692 failed_to_evac = rtsFalse; // mutable anyhow.
2693 p += tso_sizeW(tso);
2698 case RBH: // cf. BLACKHOLE_BQ
2701 nat size, ptrs, nonptrs, vhs;
2703 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2705 StgRBH *rbh = (StgRBH *)p;
2706 (StgClosure *)rbh->blocking_queue =
2707 evacuate((StgClosure *)rbh->blocking_queue);
2708 recordMutable((StgMutClosure *)to);
2709 failed_to_evac = rtsFalse; // mutable anyhow.
2711 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2712 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2713 // ToDo: use size of reverted closure here!
2714 p += BLACKHOLE_sizeW();
2720 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2721 // follow the pointer to the node which is being demanded
2722 (StgClosure *)bf->node =
2723 evacuate((StgClosure *)bf->node);
2724 // follow the link to the rest of the blocking queue
2725 (StgClosure *)bf->link =
2726 evacuate((StgClosure *)bf->link);
2727 if (failed_to_evac) {
2728 failed_to_evac = rtsFalse;
2729 recordMutable((StgMutClosure *)bf);
2732 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2733 bf, info_type((StgClosure *)bf),
2734 bf->node, info_type(bf->node)));
2735 p += sizeofW(StgBlockedFetch);
2743 p += sizeofW(StgFetchMe);
2744 break; // nothing to do in this case
2746 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2748 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2749 (StgClosure *)fmbq->blocking_queue =
2750 evacuate((StgClosure *)fmbq->blocking_queue);
2751 if (failed_to_evac) {
2752 failed_to_evac = rtsFalse;
2753 recordMutable((StgMutClosure *)fmbq);
2756 belch("@@ scavenge: %p (%s) exciting, isn't it",
2757 p, info_type((StgClosure *)p)));
2758 p += sizeofW(StgFetchMeBlockingQueue);
2764 barf("scavenge: unimplemented/strange closure type %d @ %p",
2768 /* If we didn't manage to promote all the objects pointed to by
2769 * the current object, then we have to designate this object as
2770 * mutable (because it contains old-to-new generation pointers).
2772 if (failed_to_evac) {
2773 failed_to_evac = rtsFalse;
2774 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2782 /* -----------------------------------------------------------------------------
2783 Scavenge everything on the mark stack.
2785 This is slightly different from scavenge():
2786 - we don't walk linearly through the objects, so the scavenger
2787 doesn't need to advance the pointer on to the next object.
2788 -------------------------------------------------------------------------- */
2791 scavenge_mark_stack(void)
2797 evac_gen = oldest_gen->no;
2798 saved_evac_gen = evac_gen;
2801 while (!mark_stack_empty()) {
2802 p = pop_mark_stack();
2804 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
2805 info = get_itbl((StgClosure *)p);
2808 switch (info->type) {
2811 /* treat MVars specially, because we don't want to evacuate the
2812 * mut_link field in the middle of the closure.
2815 StgMVar *mvar = ((StgMVar *)p);
2817 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2818 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2819 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2820 evac_gen = saved_evac_gen;
2821 failed_to_evac = rtsFalse; // mutable.
2826 scavenge_fun_srt(info);
2827 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2828 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2832 scavenge_thunk_srt(info);
2834 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2835 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2840 scavenge_fun_srt(info);
2841 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2846 scavenge_thunk_srt(info);
2849 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2854 scavenge_fun_srt(info);
2859 scavenge_thunk_srt(info);
2867 scavenge_fun_srt(info);
2871 scavenge_thunk_srt(info);
2882 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2883 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2884 (StgClosure *)*p = evacuate((StgClosure *)*p);
2890 StgBCO *bco = (StgBCO *)p;
2891 (StgClosure *)bco->instrs = evacuate((StgClosure *)bco->instrs);
2892 (StgClosure *)bco->literals = evacuate((StgClosure *)bco->literals);
2893 (StgClosure *)bco->ptrs = evacuate((StgClosure *)bco->ptrs);
2894 (StgClosure *)bco->itbls = evacuate((StgClosure *)bco->itbls);
2899 // don't need to do anything here: the only possible case
2900 // is that we're in a 1-space compacting collector, with
2901 // no "old" generation.
2905 case IND_OLDGEN_PERM:
2906 ((StgIndOldGen *)p)->indirectee =
2907 evacuate(((StgIndOldGen *)p)->indirectee);
2908 if (failed_to_evac) {
2909 recordOldToNewPtrs((StgMutClosure *)p);
2911 failed_to_evac = rtsFalse;
2916 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2917 evac_gen = saved_evac_gen;
2918 failed_to_evac = rtsFalse;
2923 failed_to_evac = rtsFalse;
2927 case SE_CAF_BLACKHOLE:
2935 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2936 (StgClosure *)bh->blocking_queue =
2937 evacuate((StgClosure *)bh->blocking_queue);
2938 failed_to_evac = rtsFalse;
2942 case THUNK_SELECTOR:
2944 StgSelector *s = (StgSelector *)p;
2945 s->selectee = evacuate(s->selectee);
2949 // A chunk of stack saved in a heap object
2952 StgAP_STACK *ap = (StgAP_STACK *)p;
2954 ap->fun = evacuate(ap->fun);
2955 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
2961 scavenge_PAP((StgPAP *)p);
2965 // follow everything
2969 evac_gen = 0; // repeatedly mutable
2970 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2971 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2972 (StgClosure *)*p = evacuate((StgClosure *)*p);
2974 evac_gen = saved_evac_gen;
2975 failed_to_evac = rtsFalse; // mutable anyhow.
2979 case MUT_ARR_PTRS_FROZEN:
2980 // follow everything
2984 // Set the mut_link field to NULL, so that we will put this
2985 // array on the mutable list if it is subsequently thawed
2987 ((StgMutArrPtrs*)p)->mut_link = NULL;
2989 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2990 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2991 (StgClosure *)*p = evacuate((StgClosure *)*p);
2998 StgTSO *tso = (StgTSO *)p;
3001 evac_gen = saved_evac_gen;
3002 failed_to_evac = rtsFalse;
3007 case RBH: // cf. BLACKHOLE_BQ
3010 nat size, ptrs, nonptrs, vhs;
3012 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3014 StgRBH *rbh = (StgRBH *)p;
3015 (StgClosure *)rbh->blocking_queue =
3016 evacuate((StgClosure *)rbh->blocking_queue);
3017 recordMutable((StgMutClosure *)rbh);
3018 failed_to_evac = rtsFalse; // mutable anyhow.
3020 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3021 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3027 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3028 // follow the pointer to the node which is being demanded
3029 (StgClosure *)bf->node =
3030 evacuate((StgClosure *)bf->node);
3031 // follow the link to the rest of the blocking queue
3032 (StgClosure *)bf->link =
3033 evacuate((StgClosure *)bf->link);
3034 if (failed_to_evac) {
3035 failed_to_evac = rtsFalse;
3036 recordMutable((StgMutClosure *)bf);
3039 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3040 bf, info_type((StgClosure *)bf),
3041 bf->node, info_type(bf->node)));
3049 break; // nothing to do in this case
3051 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3053 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3054 (StgClosure *)fmbq->blocking_queue =
3055 evacuate((StgClosure *)fmbq->blocking_queue);
3056 if (failed_to_evac) {
3057 failed_to_evac = rtsFalse;
3058 recordMutable((StgMutClosure *)fmbq);
3061 belch("@@ scavenge: %p (%s) exciting, isn't it",
3062 p, info_type((StgClosure *)p)));
3068 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
3072 if (failed_to_evac) {
3073 failed_to_evac = rtsFalse;
3074 mkMutCons((StgClosure *)q, &generations[evac_gen]);
3077 // mark the next bit to indicate "scavenged"
3078 mark(q+1, Bdescr(q));
3080 } // while (!mark_stack_empty())
3082 // start a new linear scan if the mark stack overflowed at some point
3083 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
3084 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
3085 mark_stack_overflowed = rtsFalse;
3086 oldgen_scan_bd = oldest_gen->steps[0].blocks;
3087 oldgen_scan = oldgen_scan_bd->start;
3090 if (oldgen_scan_bd) {
3091 // push a new thing on the mark stack
3093 // find a closure that is marked but not scavenged, and start
3095 while (oldgen_scan < oldgen_scan_bd->free
3096 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
3100 if (oldgen_scan < oldgen_scan_bd->free) {
3102 // already scavenged?
3103 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
3104 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
3107 push_mark_stack(oldgen_scan);
3108 // ToDo: bump the linear scan by the actual size of the object
3109 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
3113 oldgen_scan_bd = oldgen_scan_bd->link;
3114 if (oldgen_scan_bd != NULL) {
3115 oldgen_scan = oldgen_scan_bd->start;
3121 /* -----------------------------------------------------------------------------
3122 Scavenge one object.
3124 This is used for objects that are temporarily marked as mutable
3125 because they contain old-to-new generation pointers. Only certain
3126 objects can have this property.
3127 -------------------------------------------------------------------------- */
3130 scavenge_one(StgPtr p)
3132 const StgInfoTable *info;
3133 nat saved_evac_gen = evac_gen;
3136 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3137 info = get_itbl((StgClosure *)p);
3139 switch (info->type) {
3142 case FUN_1_0: // hardly worth specialising these guys
3162 case IND_OLDGEN_PERM:
3166 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3167 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
3168 (StgClosure *)*q = evacuate((StgClosure *)*q);
3174 case SE_CAF_BLACKHOLE:
3179 case THUNK_SELECTOR:
3181 StgSelector *s = (StgSelector *)p;
3182 s->selectee = evacuate(s->selectee);
3187 // nothing to follow
3192 // follow everything
3195 evac_gen = 0; // repeatedly mutable
3196 recordMutable((StgMutClosure *)p);
3197 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3198 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3199 (StgClosure *)*p = evacuate((StgClosure *)*p);
3201 evac_gen = saved_evac_gen;
3202 failed_to_evac = rtsFalse;
3206 case MUT_ARR_PTRS_FROZEN:
3208 // follow everything
3211 // Set the mut_link field to NULL, so that we will put this
3212 // array on the mutable list if it is subsequently thawed
3214 ((StgMutArrPtrs*)p)->mut_link = NULL;
3216 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3217 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3218 (StgClosure *)*p = evacuate((StgClosure *)*p);
3225 StgTSO *tso = (StgTSO *)p;
3227 evac_gen = 0; // repeatedly mutable
3229 recordMutable((StgMutClosure *)tso);
3230 evac_gen = saved_evac_gen;
3231 failed_to_evac = rtsFalse;
3237 StgAP_STACK *ap = (StgAP_STACK *)p;
3239 ap->fun = evacuate(ap->fun);
3240 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3241 p = (StgPtr)ap->payload + ap->size;
3247 p = scavenge_PAP((StgPAP *)p);
3251 // This might happen if for instance a MUT_CONS was pointing to a
3252 // THUNK which has since been updated. The IND_OLDGEN will
3253 // be on the mutable list anyway, so we don't need to do anything
3258 barf("scavenge_one: strange object %d", (int)(info->type));
3261 no_luck = failed_to_evac;
3262 failed_to_evac = rtsFalse;
3266 /* -----------------------------------------------------------------------------
3267 Scavenging mutable lists.
3269 We treat the mutable list of each generation > N (i.e. all the
3270 generations older than the one being collected) as roots. We also
3271 remove non-mutable objects from the mutable list at this point.
3272 -------------------------------------------------------------------------- */
3275 scavenge_mut_once_list(generation *gen)
3277 const StgInfoTable *info;
3278 StgMutClosure *p, *next, *new_list;
3280 p = gen->mut_once_list;
3281 new_list = END_MUT_LIST;
3285 failed_to_evac = rtsFalse;
3287 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3289 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3292 if (info->type==RBH)
3293 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3295 switch(info->type) {
3298 case IND_OLDGEN_PERM:
3300 /* Try to pull the indirectee into this generation, so we can
3301 * remove the indirection from the mutable list.
3303 ((StgIndOldGen *)p)->indirectee =
3304 evacuate(((StgIndOldGen *)p)->indirectee);
3306 #if 0 && defined(DEBUG)
3307 if (RtsFlags.DebugFlags.gc)
3308 /* Debugging code to print out the size of the thing we just
3312 StgPtr start = gen->steps[0].scan;
3313 bdescr *start_bd = gen->steps[0].scan_bd;
3315 scavenge(&gen->steps[0]);
3316 if (start_bd != gen->steps[0].scan_bd) {
3317 size += (P_)BLOCK_ROUND_UP(start) - start;
3318 start_bd = start_bd->link;
3319 while (start_bd != gen->steps[0].scan_bd) {
3320 size += BLOCK_SIZE_W;
3321 start_bd = start_bd->link;
3323 size += gen->steps[0].scan -
3324 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
3326 size = gen->steps[0].scan - start;
3328 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
3332 /* failed_to_evac might happen if we've got more than two
3333 * generations, we're collecting only generation 0, the
3334 * indirection resides in generation 2 and the indirectee is
3337 if (failed_to_evac) {
3338 failed_to_evac = rtsFalse;
3339 p->mut_link = new_list;
3342 /* the mut_link field of an IND_STATIC is overloaded as the
3343 * static link field too (it just so happens that we don't need
3344 * both at the same time), so we need to NULL it out when
3345 * removing this object from the mutable list because the static
3346 * link fields are all assumed to be NULL before doing a major
3354 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
3355 * it from the mutable list if possible by promoting whatever it
3358 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
3359 /* didn't manage to promote everything, so put the
3360 * MUT_CONS back on the list.
3362 p->mut_link = new_list;
3368 // shouldn't have anything else on the mutables list
3369 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
3373 gen->mut_once_list = new_list;
3378 scavenge_mutable_list(generation *gen)
3380 const StgInfoTable *info;
3381 StgMutClosure *p, *next;
3383 p = gen->saved_mut_list;
3387 failed_to_evac = rtsFalse;
3389 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3391 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3394 if (info->type==RBH)
3395 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3397 switch(info->type) {
3400 // follow everything
3401 p->mut_link = gen->mut_list;
3406 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3407 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3408 (StgClosure *)*q = evacuate((StgClosure *)*q);
3413 // Happens if a MUT_ARR_PTRS in the old generation is frozen
3414 case MUT_ARR_PTRS_FROZEN:
3419 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3420 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3421 (StgClosure *)*q = evacuate((StgClosure *)*q);
3424 // Set the mut_link field to NULL, so that we will put this
3425 // array back on the mutable list if it is subsequently thawed
3428 if (failed_to_evac) {
3429 failed_to_evac = rtsFalse;
3430 mkMutCons((StgClosure *)p, gen);
3436 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3437 p->mut_link = gen->mut_list;
3443 StgMVar *mvar = (StgMVar *)p;
3444 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
3445 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
3446 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
3447 p->mut_link = gen->mut_list;
3454 StgTSO *tso = (StgTSO *)p;
3458 /* Don't take this TSO off the mutable list - it might still
3459 * point to some younger objects (because we set evac_gen to 0
3462 tso->mut_link = gen->mut_list;
3463 gen->mut_list = (StgMutClosure *)tso;
3469 StgBlockingQueue *bh = (StgBlockingQueue *)p;
3470 (StgClosure *)bh->blocking_queue =
3471 evacuate((StgClosure *)bh->blocking_queue);
3472 p->mut_link = gen->mut_list;
3477 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3480 case IND_OLDGEN_PERM:
3481 /* Try to pull the indirectee into this generation, so we can
3482 * remove the indirection from the mutable list.
3485 ((StgIndOldGen *)p)->indirectee =
3486 evacuate(((StgIndOldGen *)p)->indirectee);
3489 if (failed_to_evac) {
3490 failed_to_evac = rtsFalse;
3491 p->mut_link = gen->mut_once_list;
3492 gen->mut_once_list = p;
3499 // HWL: check whether all of these are necessary
3501 case RBH: // cf. BLACKHOLE_BQ
3503 // nat size, ptrs, nonptrs, vhs;
3505 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3506 StgRBH *rbh = (StgRBH *)p;
3507 (StgClosure *)rbh->blocking_queue =
3508 evacuate((StgClosure *)rbh->blocking_queue);
3509 if (failed_to_evac) {
3510 failed_to_evac = rtsFalse;
3511 recordMutable((StgMutClosure *)rbh);
3513 // ToDo: use size of reverted closure here!
3514 p += BLACKHOLE_sizeW();
3520 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3521 // follow the pointer to the node which is being demanded
3522 (StgClosure *)bf->node =
3523 evacuate((StgClosure *)bf->node);
3524 // follow the link to the rest of the blocking queue
3525 (StgClosure *)bf->link =
3526 evacuate((StgClosure *)bf->link);
3527 if (failed_to_evac) {
3528 failed_to_evac = rtsFalse;
3529 recordMutable((StgMutClosure *)bf);
3531 p += sizeofW(StgBlockedFetch);
3537 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3540 p += sizeofW(StgFetchMe);
3541 break; // nothing to do in this case
3543 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3545 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3546 (StgClosure *)fmbq->blocking_queue =
3547 evacuate((StgClosure *)fmbq->blocking_queue);
3548 if (failed_to_evac) {
3549 failed_to_evac = rtsFalse;
3550 recordMutable((StgMutClosure *)fmbq);
3552 p += sizeofW(StgFetchMeBlockingQueue);
3558 // shouldn't have anything else on the mutables list
3559 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3566 scavenge_static(void)
3568 StgClosure* p = static_objects;
3569 const StgInfoTable *info;
3571 /* Always evacuate straight to the oldest generation for static
3573 evac_gen = oldest_gen->no;
3575 /* keep going until we've scavenged all the objects on the linked
3577 while (p != END_OF_STATIC_LIST) {
3579 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3582 if (info->type==RBH)
3583 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3585 // make sure the info pointer is into text space
3587 /* Take this object *off* the static_objects list,
3588 * and put it on the scavenged_static_objects list.
3590 static_objects = STATIC_LINK(info,p);
3591 STATIC_LINK(info,p) = scavenged_static_objects;
3592 scavenged_static_objects = p;
3594 switch (info -> type) {
3598 StgInd *ind = (StgInd *)p;
3599 ind->indirectee = evacuate(ind->indirectee);
3601 /* might fail to evacuate it, in which case we have to pop it
3602 * back on the mutable list (and take it off the
3603 * scavenged_static list because the static link and mut link
3604 * pointers are one and the same).
3606 if (failed_to_evac) {
3607 failed_to_evac = rtsFalse;
3608 scavenged_static_objects = IND_STATIC_LINK(p);
3609 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3610 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3616 scavenge_thunk_srt(info);
3620 scavenge_fun_srt(info);
3627 next = (P_)p->payload + info->layout.payload.ptrs;
3628 // evacuate the pointers
3629 for (q = (P_)p->payload; q < next; q++) {
3630 (StgClosure *)*q = evacuate((StgClosure *)*q);
3636 barf("scavenge_static: strange closure %d", (int)(info->type));
3639 ASSERT(failed_to_evac == rtsFalse);
3641 /* get the next static object from the list. Remember, there might
3642 * be more stuff on this list now that we've done some evacuating!
3643 * (static_objects is a global)
3649 /* -----------------------------------------------------------------------------
3650 scavenge a chunk of memory described by a bitmap
3651 -------------------------------------------------------------------------- */
3654 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
3660 bitmap = large_bitmap->bitmap[b];
3661 for (i = 0; i < size; ) {
3662 if ((bitmap & 1) == 0) {
3663 (StgClosure *)*p = evacuate((StgClosure *)*p);
3667 if (i % BITS_IN(W_) == 0) {
3669 bitmap = large_bitmap->bitmap[b];
3671 bitmap = bitmap >> 1;
3676 static inline StgPtr
3677 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
3680 if ((bitmap & 1) == 0) {
3681 (StgClosure *)*p = evacuate((StgClosure *)*p);
3684 bitmap = bitmap >> 1;
3690 /* -----------------------------------------------------------------------------
3691 scavenge_stack walks over a section of stack and evacuates all the
3692 objects pointed to by it. We can use the same code for walking
3693 AP_STACK_UPDs, since these are just sections of copied stack.
3694 -------------------------------------------------------------------------- */
3698 scavenge_stack(StgPtr p, StgPtr stack_end)
3700 const StgRetInfoTable* info;
3704 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3707 * Each time around this loop, we are looking at a chunk of stack
3708 * that starts with an activation record.
3711 while (p < stack_end) {
3712 info = get_ret_itbl((StgClosure *)p);
3714 switch (info->i.type) {
3717 ((StgUpdateFrame *)p)->updatee
3718 = evacuate(((StgUpdateFrame *)p)->updatee);
3719 p += sizeofW(StgUpdateFrame);
3722 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3727 bitmap = BITMAP_BITS(info->i.layout.bitmap);
3728 size = BITMAP_SIZE(info->i.layout.bitmap);
3729 // NOTE: the payload starts immediately after the info-ptr, we
3730 // don't have an StgHeader in the same sense as a heap closure.
3732 p = scavenge_small_bitmap(p, size, bitmap);
3735 scavenge_srt((StgClosure **)info->srt, info->i.srt_bitmap);
3743 (StgClosure *)*p = evacuate((StgClosure *)*p);
3746 size = BCO_BITMAP_SIZE(bco);
3747 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
3752 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3758 size = info->i.layout.large_bitmap->size;
3760 scavenge_large_bitmap(p, info->i.layout.large_bitmap, size);
3762 // and don't forget to follow the SRT
3766 // Dynamic bitmap: the mask is stored on the stack, and
3767 // there are a number of non-pointers followed by a number
3768 // of pointers above the bitmapped area. (see StgMacros.h,
3773 dyn = ((StgRetDyn *)p)->liveness;
3775 // traverse the bitmap first
3776 bitmap = GET_LIVENESS(dyn);
3777 p = (P_)&((StgRetDyn *)p)->payload[0];
3778 size = RET_DYN_BITMAP_SIZE;
3779 p = scavenge_small_bitmap(p, size, bitmap);
3781 // skip over the non-ptr words
3782 p += GET_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
3784 // follow the ptr words
3785 for (size = GET_PTRS(dyn); size > 0; size--) {
3786 (StgClosure *)*p = evacuate((StgClosure *)*p);
3794 StgRetFun *ret_fun = (StgRetFun *)p;
3795 StgFunInfoTable *fun_info;
3797 ret_fun->fun = evacuate(ret_fun->fun);
3798 fun_info = get_fun_itbl(ret_fun->fun);
3799 p = scavenge_arg_block(fun_info, ret_fun->payload);
3804 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
3809 /*-----------------------------------------------------------------------------
3810 scavenge the large object list.
3812 evac_gen set by caller; similar games played with evac_gen as with
3813 scavenge() - see comment at the top of scavenge(). Most large
3814 objects are (repeatedly) mutable, so most of the time evac_gen will
3816 --------------------------------------------------------------------------- */
3819 scavenge_large(step *stp)
3824 bd = stp->new_large_objects;
3826 for (; bd != NULL; bd = stp->new_large_objects) {
3828 /* take this object *off* the large objects list and put it on
3829 * the scavenged large objects list. This is so that we can
3830 * treat new_large_objects as a stack and push new objects on
3831 * the front when evacuating.
3833 stp->new_large_objects = bd->link;
3834 dbl_link_onto(bd, &stp->scavenged_large_objects);
3836 // update the block count in this step.
3837 stp->n_scavenged_large_blocks += bd->blocks;
3840 if (scavenge_one(p)) {
3841 mkMutCons((StgClosure *)p, stp->gen);
3846 /* -----------------------------------------------------------------------------
3847 Initialising the static object & mutable lists
3848 -------------------------------------------------------------------------- */
3851 zero_static_object_list(StgClosure* first_static)
3855 const StgInfoTable *info;
3857 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3859 link = STATIC_LINK(info, p);
3860 STATIC_LINK(info,p) = NULL;
3864 /* This function is only needed because we share the mutable link
3865 * field with the static link field in an IND_STATIC, so we have to
3866 * zero the mut_link field before doing a major GC, which needs the
3867 * static link field.
3869 * It doesn't do any harm to zero all the mutable link fields on the
3874 zero_mutable_list( StgMutClosure *first )
3876 StgMutClosure *next, *c;
3878 for (c = first; c != END_MUT_LIST; c = next) {
3884 /* -----------------------------------------------------------------------------
3886 -------------------------------------------------------------------------- */
3893 for (c = (StgIndStatic *)caf_list; c != NULL;
3894 c = (StgIndStatic *)c->static_link)
3896 c->header.info = c->saved_info;
3897 c->saved_info = NULL;
3898 // could, but not necessary: c->static_link = NULL;
3904 markCAFs( evac_fn evac )
3908 for (c = (StgIndStatic *)caf_list; c != NULL;
3909 c = (StgIndStatic *)c->static_link)
3911 evac(&c->indirectee);
3915 /* -----------------------------------------------------------------------------
3916 Sanity code for CAF garbage collection.
3918 With DEBUG turned on, we manage a CAF list in addition to the SRT
3919 mechanism. After GC, we run down the CAF list and blackhole any
3920 CAFs which have been garbage collected. This means we get an error
3921 whenever the program tries to enter a garbage collected CAF.
3923 Any garbage collected CAFs are taken off the CAF list at the same
3925 -------------------------------------------------------------------------- */
3927 #if 0 && defined(DEBUG)
3934 const StgInfoTable *info;
3945 ASSERT(info->type == IND_STATIC);
3947 if (STATIC_LINK(info,p) == NULL) {
3948 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3950 SET_INFO(p,&stg_BLACKHOLE_info);
3951 p = STATIC_LINK2(info,p);
3955 pp = &STATIC_LINK2(info,p);
3962 // belch("%d CAFs live", i);
3967 /* -----------------------------------------------------------------------------
3970 Whenever a thread returns to the scheduler after possibly doing
3971 some work, we have to run down the stack and black-hole all the
3972 closures referred to by update frames.
3973 -------------------------------------------------------------------------- */
3976 threadLazyBlackHole(StgTSO *tso)
3979 StgRetInfoTable *info;
3980 StgBlockingQueue *bh;
3983 stack_end = &tso->stack[tso->stack_size];
3985 frame = (StgClosure *)tso->sp;
3988 info = get_ret_itbl(frame);
3990 switch (info->i.type) {
3993 bh = (StgBlockingQueue *)((StgUpdateFrame *)frame)->updatee;
3995 /* if the thunk is already blackholed, it means we've also
3996 * already blackholed the rest of the thunks on this stack,
3997 * so we can stop early.
3999 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
4000 * don't interfere with this optimisation.
4002 if (bh->header.info == &stg_BLACKHOLE_info) {
4006 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
4007 bh->header.info != &stg_CAF_BLACKHOLE_info) {
4008 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
4009 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
4013 // We pretend that bh is now dead.
4014 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
4016 SET_INFO(bh,&stg_BLACKHOLE_info);
4019 // We pretend that bh has just been created.
4020 LDV_recordCreate(bh);
4024 frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
4030 // normal stack frames; do nothing except advance the pointer
4032 (StgPtr)frame += stack_frame_sizeW(frame);
4038 /* -----------------------------------------------------------------------------
4041 * Code largely pinched from old RTS, then hacked to bits. We also do
4042 * lazy black holing here.
4044 * -------------------------------------------------------------------------- */
4046 struct stack_gap { StgWord gap_size; struct stack_gap *next_gap; };
4049 threadSqueezeStack(StgTSO *tso)
4052 rtsBool prev_was_update_frame;
4053 StgClosure *updatee = NULL;
4055 StgRetInfoTable *info;
4056 StgWord current_gap_size;
4057 struct stack_gap *gap;
4060 // Traverse the stack upwards, replacing adjacent update frames
4061 // with a single update frame and a "stack gap". A stack gap
4062 // contains two values: the size of the gap, and the distance
4063 // to the next gap (or the stack top).
4065 bottom = &(tso->stack[tso->stack_size]);
4069 ASSERT(frame < bottom);
4071 prev_was_update_frame = rtsFalse;
4072 current_gap_size = 0;
4073 gap = (struct stack_gap *) (tso->sp - sizeofW(StgUpdateFrame));
4075 while (frame < bottom) {
4077 info = get_ret_itbl((StgClosure *)frame);
4078 switch (info->i.type) {
4082 StgUpdateFrame *upd = (StgUpdateFrame *)frame;
4084 if (upd->updatee->header.info == &stg_BLACKHOLE_info) {
4086 // found a BLACKHOLE'd update frame; we've been here
4087 // before, in a previous GC, so just break out.
4089 // Mark the end of the gap, if we're in one.
4090 if (current_gap_size != 0) {
4091 gap = (struct stack_gap *)(frame-sizeofW(StgUpdateFrame));
4094 frame += sizeofW(StgUpdateFrame);
4095 goto done_traversing;
4098 if (prev_was_update_frame) {
4100 TICK_UPD_SQUEEZED();
4101 /* wasn't there something about update squeezing and ticky to be
4102 * sorted out? oh yes: we aren't counting each enter properly
4103 * in this case. See the log somewhere. KSW 1999-04-21
4105 * Check two things: that the two update frames don't point to
4106 * the same object, and that the updatee_bypass isn't already an
4107 * indirection. Both of these cases only happen when we're in a
4108 * block hole-style loop (and there are multiple update frames
4109 * on the stack pointing to the same closure), but they can both
4110 * screw us up if we don't check.
4112 if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
4113 // this wakes the threads up
4114 UPD_IND_NOLOCK(upd->updatee, updatee);
4117 // now mark this update frame as a stack gap. The gap
4118 // marker resides in the bottom-most update frame of
4119 // the series of adjacent frames, and covers all the
4120 // frames in this series.
4121 current_gap_size += sizeofW(StgUpdateFrame);
4122 ((struct stack_gap *)frame)->gap_size = current_gap_size;
4123 ((struct stack_gap *)frame)->next_gap = gap;
4125 frame += sizeofW(StgUpdateFrame);
4129 // single update frame, or the topmost update frame in a series
4131 StgBlockingQueue *bh = (StgBlockingQueue *)upd->updatee;
4133 // Do lazy black-holing
4134 if (bh->header.info != &stg_BLACKHOLE_info &&
4135 bh->header.info != &stg_BLACKHOLE_BQ_info &&
4136 bh->header.info != &stg_CAF_BLACKHOLE_info) {
4137 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
4138 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
4141 /* zero out the slop so that the sanity checker can tell
4142 * where the next closure is.
4145 StgInfoTable *bh_info = get_itbl(bh);
4146 nat np = bh_info->layout.payload.ptrs,
4147 nw = bh_info->layout.payload.nptrs, i;
4148 /* don't zero out slop for a THUNK_SELECTOR,
4149 * because its layout info is used for a
4150 * different purpose, and it's exactly the
4151 * same size as a BLACKHOLE in any case.
4153 if (bh_info->type != THUNK_SELECTOR) {
4154 for (i = np; i < np + nw; i++) {
4155 ((StgClosure *)bh)->payload[i] = 0;
4161 // We pretend that bh is now dead.
4162 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
4164 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
4165 SET_INFO(bh,&stg_BLACKHOLE_info);
4167 // We pretend that bh has just been created.
4168 LDV_recordCreate(bh);
4172 prev_was_update_frame = rtsTrue;
4173 updatee = upd->updatee;
4174 frame += sizeofW(StgUpdateFrame);
4180 prev_was_update_frame = rtsFalse;
4182 // we're not in a gap... check whether this is the end of a gap
4183 // (an update frame can't be the end of a gap).
4184 if (current_gap_size != 0) {
4185 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
4187 current_gap_size = 0;
4189 frame += stack_frame_sizeW((StgClosure *)frame);
4196 // Now we have a stack with gaps in it, and we have to walk down
4197 // shoving the stack up to fill in the gaps. A diagram might
4201 // | ********* | <- sp
4205 // | stack_gap | <- gap | chunk_size
4207 // | ......... | <- gap_end v
4213 // 'sp' points the the current top-of-stack
4214 // 'gap' points to the stack_gap structure inside the gap
4215 // ***** indicates real stack data
4216 // ..... indicates gap
4217 // <empty> indicates unused
4221 void *gap_start, *next_gap_start, *gap_end;
4224 next_gap_start = (void *)gap + sizeof(StgUpdateFrame);
4225 sp = next_gap_start;
4227 while ((StgPtr)gap > tso->sp) {
4229 // we're working in *bytes* now...
4230 gap_start = next_gap_start;
4231 gap_end = gap_start - gap->gap_size * sizeof(W_);
4233 gap = gap->next_gap;
4234 next_gap_start = (void *)gap + sizeof(StgUpdateFrame);
4236 chunk_size = gap_end - next_gap_start;
4238 memmove(sp, next_gap_start, chunk_size);
4241 tso->sp = (StgPtr)sp;
4245 /* -----------------------------------------------------------------------------
4248 * We have to prepare for GC - this means doing lazy black holing
4249 * here. We also take the opportunity to do stack squeezing if it's
4251 * -------------------------------------------------------------------------- */
4253 threadPaused(StgTSO *tso)
4255 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
4256 threadSqueezeStack(tso); // does black holing too
4258 threadLazyBlackHole(tso);
4261 /* -----------------------------------------------------------------------------
4263 * -------------------------------------------------------------------------- */
4267 printMutOnceList(generation *gen)
4269 StgMutClosure *p, *next;
4271 p = gen->mut_once_list;
4274 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
4275 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
4276 fprintf(stderr, "%p (%s), ",
4277 p, info_type((StgClosure *)p));
4279 fputc('\n', stderr);
4283 printMutableList(generation *gen)
4285 StgMutClosure *p, *next;
4290 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
4291 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
4292 fprintf(stderr, "%p (%s), ",
4293 p, info_type((StgClosure *)p));
4295 fputc('\n', stderr);
4298 static inline rtsBool
4299 maybeLarge(StgClosure *closure)
4301 StgInfoTable *info = get_itbl(closure);
4303 /* closure types that may be found on the new_large_objects list;
4304 see scavenge_large */
4305 return (info->type == MUT_ARR_PTRS ||
4306 info->type == MUT_ARR_PTRS_FROZEN ||
4307 info->type == TSO ||
4308 info->type == ARR_WORDS);