1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.140 2002/09/06 09:56:12 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
15 #include "StoragePriv.h"
18 #include "SchedAPI.h" // for ReverCAFs prototype
20 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
30 #if defined(GRAN) || defined(PAR)
31 # include "GranSimRts.h"
32 # include "ParallelRts.h"
36 # include "ParallelDebug.h"
41 #if defined(RTS_GTK_FRONTPANEL)
42 #include "FrontPanel.h"
45 #include "RetainerProfile.h"
46 #include "LdvProfile.h"
50 /* STATIC OBJECT LIST.
53 * We maintain a linked list of static objects that are still live.
54 * The requirements for this list are:
56 * - we need to scan the list while adding to it, in order to
57 * scavenge all the static objects (in the same way that
58 * breadth-first scavenging works for dynamic objects).
60 * - we need to be able to tell whether an object is already on
61 * the list, to break loops.
63 * Each static object has a "static link field", which we use for
64 * linking objects on to the list. We use a stack-type list, consing
65 * objects on the front as they are added (this means that the
66 * scavenge phase is depth-first, not breadth-first, but that
69 * A separate list is kept for objects that have been scavenged
70 * already - this is so that we can zero all the marks afterwards.
72 * An object is on the list if its static link field is non-zero; this
73 * means that we have to mark the end of the list with '1', not NULL.
75 * Extra notes for generational GC:
77 * Each generation has a static object list associated with it. When
78 * collecting generations up to N, we treat the static object lists
79 * from generations > N as roots.
81 * We build up a static object list while collecting generations 0..N,
82 * which is then appended to the static object list of generation N+1.
84 static StgClosure* static_objects; // live static objects
85 StgClosure* scavenged_static_objects; // static objects scavenged so far
87 /* N is the oldest generation being collected, where the generations
88 * are numbered starting at 0. A major GC (indicated by the major_gc
89 * flag) is when we're collecting all generations. We only attempt to
90 * deal with static objects and GC CAFs when doing a major GC.
93 static rtsBool major_gc;
95 /* Youngest generation that objects should be evacuated to in
96 * evacuate(). (Logically an argument to evacuate, but it's static
97 * a lot of the time so we optimise it into a global variable).
103 StgWeak *old_weak_ptr_list; // also pending finaliser list
105 /* Which stage of processing various kinds of weak pointer are we at?
106 * (see traverse_weak_ptr_list() below for discussion).
108 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
109 static WeakStage weak_stage;
111 /* List of all threads during GC
113 static StgTSO *old_all_threads;
114 StgTSO *resurrected_threads;
116 /* Flag indicating failure to evacuate an object to the desired
119 static rtsBool failed_to_evac;
121 /* Old to-space (used for two-space collector only)
123 static bdescr *old_to_blocks;
125 /* Data used for allocation area sizing.
127 static lnat new_blocks; // blocks allocated during this GC
128 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
130 /* Used to avoid long recursion due to selector thunks
132 static lnat thunk_selector_depth = 0;
133 #define MAX_THUNK_SELECTOR_DEPTH 256
135 /* -----------------------------------------------------------------------------
136 Static function declarations
137 -------------------------------------------------------------------------- */
139 static void mark_root ( StgClosure **root );
140 static StgClosure * evacuate ( StgClosure *q );
141 static void zero_static_object_list ( StgClosure* first_static );
142 static void zero_mutable_list ( StgMutClosure *first );
144 static rtsBool traverse_weak_ptr_list ( void );
145 static void mark_weak_ptr_list ( StgWeak **list );
147 static StgClosure * eval_thunk_selector ( nat field, StgSelector * p );
149 static void scavenge ( step * );
150 static void scavenge_mark_stack ( void );
151 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
152 static rtsBool scavenge_one ( StgPtr p );
153 static void scavenge_large ( step * );
154 static void scavenge_static ( void );
155 static void scavenge_mutable_list ( generation *g );
156 static void scavenge_mut_once_list ( generation *g );
158 #if 0 && defined(DEBUG)
159 static void gcCAFs ( void );
162 /* -----------------------------------------------------------------------------
163 inline functions etc. for dealing with the mark bitmap & stack.
164 -------------------------------------------------------------------------- */
166 #define MARK_STACK_BLOCKS 4
168 static bdescr *mark_stack_bdescr;
169 static StgPtr *mark_stack;
170 static StgPtr *mark_sp;
171 static StgPtr *mark_splim;
173 // Flag and pointers used for falling back to a linear scan when the
174 // mark stack overflows.
175 static rtsBool mark_stack_overflowed;
176 static bdescr *oldgen_scan_bd;
177 static StgPtr oldgen_scan;
179 static inline rtsBool
180 mark_stack_empty(void)
182 return mark_sp == mark_stack;
185 static inline rtsBool
186 mark_stack_full(void)
188 return mark_sp >= mark_splim;
192 reset_mark_stack(void)
194 mark_sp = mark_stack;
198 push_mark_stack(StgPtr p)
209 /* -----------------------------------------------------------------------------
212 For garbage collecting generation N (and all younger generations):
214 - follow all pointers in the root set. the root set includes all
215 mutable objects in all steps in all generations.
217 - for each pointer, evacuate the object it points to into either
218 + to-space in the next higher step in that generation, if one exists,
219 + if the object's generation == N, then evacuate it to the next
220 generation if one exists, or else to-space in the current
222 + if the object's generation < N, then evacuate it to to-space
223 in the next generation.
225 - repeatedly scavenge to-space from each step in each generation
226 being collected until no more objects can be evacuated.
228 - free from-space in each step, and set from-space = to-space.
230 Locks held: sched_mutex
232 -------------------------------------------------------------------------- */
235 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
239 lnat live, allocated, collected = 0, copied = 0;
240 lnat oldgen_saved_blocks = 0;
244 CostCentreStack *prev_CCS;
247 #if defined(DEBUG) && defined(GRAN)
248 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
252 // tell the stats department that we've started a GC
255 // Init stats and print par specific (timing) info
256 PAR_TICKY_PAR_START();
258 // attribute any costs to CCS_GC
264 /* Approximate how much we allocated.
265 * Todo: only when generating stats?
267 allocated = calcAllocated();
269 /* Figure out which generation to collect
271 if (force_major_gc) {
272 N = RtsFlags.GcFlags.generations - 1;
276 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
277 if (generations[g].steps[0].n_blocks +
278 generations[g].steps[0].n_large_blocks
279 >= generations[g].max_blocks) {
283 major_gc = (N == RtsFlags.GcFlags.generations-1);
286 #ifdef RTS_GTK_FRONTPANEL
287 if (RtsFlags.GcFlags.frontpanel) {
288 updateFrontPanelBeforeGC(N);
292 // check stack sanity *before* GC (ToDo: check all threads)
294 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
296 IF_DEBUG(sanity, checkFreeListSanity());
298 /* Initialise the static object lists
300 static_objects = END_OF_STATIC_LIST;
301 scavenged_static_objects = END_OF_STATIC_LIST;
303 /* zero the mutable list for the oldest generation (see comment by
304 * zero_mutable_list below).
307 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
310 /* Save the old to-space if we're doing a two-space collection
312 if (RtsFlags.GcFlags.generations == 1) {
313 old_to_blocks = g0s0->to_blocks;
314 g0s0->to_blocks = NULL;
317 /* Keep a count of how many new blocks we allocated during this GC
318 * (used for resizing the allocation area, later).
322 /* Initialise to-space in all the generations/steps that we're
325 for (g = 0; g <= N; g++) {
326 generations[g].mut_once_list = END_MUT_LIST;
327 generations[g].mut_list = END_MUT_LIST;
329 for (s = 0; s < generations[g].n_steps; s++) {
331 // generation 0, step 0 doesn't need to-space
332 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
336 /* Get a free block for to-space. Extra blocks will be chained on
340 stp = &generations[g].steps[s];
341 ASSERT(stp->gen_no == g);
342 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
346 bd->flags = BF_EVACUATED; // it's a to-space block
348 stp->hpLim = stp->hp + BLOCK_SIZE_W;
351 stp->n_to_blocks = 1;
352 stp->scan = bd->start;
354 stp->new_large_objects = NULL;
355 stp->scavenged_large_objects = NULL;
356 stp->n_scavenged_large_blocks = 0;
358 // mark the large objects as not evacuated yet
359 for (bd = stp->large_objects; bd; bd = bd->link) {
360 bd->flags = BF_LARGE;
363 // for a compacted step, we need to allocate the bitmap
364 if (stp->is_compacted) {
365 nat bitmap_size; // in bytes
366 bdescr *bitmap_bdescr;
369 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
371 if (bitmap_size > 0) {
372 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
374 stp->bitmap = bitmap_bdescr;
375 bitmap = bitmap_bdescr->start;
377 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
378 bitmap_size, bitmap););
380 // don't forget to fill it with zeros!
381 memset(bitmap, 0, bitmap_size);
383 // for each block in this step, point to its bitmap from the
385 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
386 bd->u.bitmap = bitmap;
387 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
394 /* make sure the older generations have at least one block to
395 * allocate into (this makes things easier for copy(), see below.
397 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
398 for (s = 0; s < generations[g].n_steps; s++) {
399 stp = &generations[g].steps[s];
400 if (stp->hp_bd == NULL) {
401 ASSERT(stp->blocks == NULL);
406 bd->flags = 0; // *not* a to-space block or a large object
408 stp->hpLim = stp->hp + BLOCK_SIZE_W;
414 /* Set the scan pointer for older generations: remember we
415 * still have to scavenge objects that have been promoted. */
417 stp->scan_bd = stp->hp_bd;
418 stp->to_blocks = NULL;
419 stp->n_to_blocks = 0;
420 stp->new_large_objects = NULL;
421 stp->scavenged_large_objects = NULL;
422 stp->n_scavenged_large_blocks = 0;
426 /* Allocate a mark stack if we're doing a major collection.
429 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
430 mark_stack = (StgPtr *)mark_stack_bdescr->start;
431 mark_sp = mark_stack;
432 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
434 mark_stack_bdescr = NULL;
437 /* -----------------------------------------------------------------------
438 * follow all the roots that we know about:
439 * - mutable lists from each generation > N
440 * we want to *scavenge* these roots, not evacuate them: they're not
441 * going to move in this GC.
442 * Also: do them in reverse generation order. This is because we
443 * often want to promote objects that are pointed to by older
444 * generations early, so we don't have to repeatedly copy them.
445 * Doing the generations in reverse order ensures that we don't end
446 * up in the situation where we want to evac an object to gen 3 and
447 * it has already been evaced to gen 2.
451 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
452 generations[g].saved_mut_list = generations[g].mut_list;
453 generations[g].mut_list = END_MUT_LIST;
456 // Do the mut-once lists first
457 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
458 IF_PAR_DEBUG(verbose,
459 printMutOnceList(&generations[g]));
460 scavenge_mut_once_list(&generations[g]);
462 for (st = generations[g].n_steps-1; st >= 0; st--) {
463 scavenge(&generations[g].steps[st]);
467 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
468 IF_PAR_DEBUG(verbose,
469 printMutableList(&generations[g]));
470 scavenge_mutable_list(&generations[g]);
472 for (st = generations[g].n_steps-1; st >= 0; st--) {
473 scavenge(&generations[g].steps[st]);
478 /* follow roots from the CAF list (used by GHCi)
483 /* follow all the roots that the application knows about.
486 get_roots(mark_root);
489 /* And don't forget to mark the TSO if we got here direct from
491 /* Not needed in a seq version?
493 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
497 // Mark the entries in the GALA table of the parallel system
498 markLocalGAs(major_gc);
499 // Mark all entries on the list of pending fetches
500 markPendingFetches(major_gc);
503 /* Mark the weak pointer list, and prepare to detect dead weak
506 mark_weak_ptr_list(&weak_ptr_list);
507 old_weak_ptr_list = weak_ptr_list;
508 weak_ptr_list = NULL;
509 weak_stage = WeakPtrs;
511 /* The all_threads list is like the weak_ptr_list.
512 * See traverse_weak_ptr_list() for the details.
514 old_all_threads = all_threads;
515 all_threads = END_TSO_QUEUE;
516 resurrected_threads = END_TSO_QUEUE;
518 /* Mark the stable pointer table.
520 markStablePtrTable(mark_root);
524 /* ToDo: To fix the caf leak, we need to make the commented out
525 * parts of this code do something sensible - as described in
528 extern void markHugsObjects(void);
533 /* -------------------------------------------------------------------------
534 * Repeatedly scavenge all the areas we know about until there's no
535 * more scavenging to be done.
542 // scavenge static objects
543 if (major_gc && static_objects != END_OF_STATIC_LIST) {
544 IF_DEBUG(sanity, checkStaticObjects(static_objects));
548 /* When scavenging the older generations: Objects may have been
549 * evacuated from generations <= N into older generations, and we
550 * need to scavenge these objects. We're going to try to ensure that
551 * any evacuations that occur move the objects into at least the
552 * same generation as the object being scavenged, otherwise we
553 * have to create new entries on the mutable list for the older
557 // scavenge each step in generations 0..maxgen
563 // scavenge objects in compacted generation
564 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
565 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
566 scavenge_mark_stack();
570 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
571 for (st = generations[gen].n_steps; --st >= 0; ) {
572 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
575 stp = &generations[gen].steps[st];
577 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
582 if (stp->new_large_objects != NULL) {
591 if (flag) { goto loop; }
593 // must be last... invariant is that everything is fully
594 // scavenged at this point.
595 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
600 /* Update the pointers from the "main thread" list - these are
601 * treated as weak pointers because we want to allow a main thread
602 * to get a BlockedOnDeadMVar exception in the same way as any other
603 * thread. Note that the threads should all have been retained by
604 * GC by virtue of being on the all_threads list, we're just
605 * updating pointers here.
610 for (m = main_threads; m != NULL; m = m->link) {
611 tso = (StgTSO *) isAlive((StgClosure *)m->tso);
613 barf("main thread has been GC'd");
620 // Reconstruct the Global Address tables used in GUM
621 rebuildGAtables(major_gc);
622 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
625 // Now see which stable names are still alive.
628 // Tidy the end of the to-space chains
629 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
630 for (s = 0; s < generations[g].n_steps; s++) {
631 stp = &generations[g].steps[s];
632 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
633 stp->hp_bd->free = stp->hp;
634 stp->hp_bd->link = NULL;
640 // We call processHeapClosureForDead() on every closure destroyed during
641 // the current garbage collection, so we invoke LdvCensusForDead().
642 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
643 || RtsFlags.ProfFlags.bioSelector != NULL)
647 // NO MORE EVACUATION AFTER THIS POINT!
648 // Finally: compaction of the oldest generation.
649 if (major_gc && oldest_gen->steps[0].is_compacted) {
650 // save number of blocks for stats
651 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
655 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
657 /* run through all the generations/steps and tidy up
659 copied = new_blocks * BLOCK_SIZE_W;
660 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
663 generations[g].collections++; // for stats
666 for (s = 0; s < generations[g].n_steps; s++) {
668 stp = &generations[g].steps[s];
670 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
671 // stats information: how much we copied
673 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
678 // for generations we collected...
681 // rough calculation of garbage collected, for stats output
682 if (stp->is_compacted) {
683 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
685 collected += stp->n_blocks * BLOCK_SIZE_W;
688 /* free old memory and shift to-space into from-space for all
689 * the collected steps (except the allocation area). These
690 * freed blocks will probaby be quickly recycled.
692 if (!(g == 0 && s == 0)) {
693 if (stp->is_compacted) {
694 // for a compacted step, just shift the new to-space
695 // onto the front of the now-compacted existing blocks.
696 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
697 bd->flags &= ~BF_EVACUATED; // now from-space
699 // tack the new blocks on the end of the existing blocks
700 if (stp->blocks == NULL) {
701 stp->blocks = stp->to_blocks;
703 for (bd = stp->blocks; bd != NULL; bd = next) {
706 bd->link = stp->to_blocks;
710 // add the new blocks to the block tally
711 stp->n_blocks += stp->n_to_blocks;
713 freeChain(stp->blocks);
714 stp->blocks = stp->to_blocks;
715 stp->n_blocks = stp->n_to_blocks;
716 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
717 bd->flags &= ~BF_EVACUATED; // now from-space
720 stp->to_blocks = NULL;
721 stp->n_to_blocks = 0;
724 /* LARGE OBJECTS. The current live large objects are chained on
725 * scavenged_large, having been moved during garbage
726 * collection from large_objects. Any objects left on
727 * large_objects list are therefore dead, so we free them here.
729 for (bd = stp->large_objects; bd != NULL; bd = next) {
735 // update the count of blocks used by large objects
736 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
737 bd->flags &= ~BF_EVACUATED;
739 stp->large_objects = stp->scavenged_large_objects;
740 stp->n_large_blocks = stp->n_scavenged_large_blocks;
743 // for older generations...
745 /* For older generations, we need to append the
746 * scavenged_large_object list (i.e. large objects that have been
747 * promoted during this GC) to the large_object list for that step.
749 for (bd = stp->scavenged_large_objects; bd; bd = next) {
751 bd->flags &= ~BF_EVACUATED;
752 dbl_link_onto(bd, &stp->large_objects);
755 // add the new blocks we promoted during this GC
756 stp->n_blocks += stp->n_to_blocks;
757 stp->n_large_blocks += stp->n_scavenged_large_blocks;
762 /* Reset the sizes of the older generations when we do a major
765 * CURRENT STRATEGY: make all generations except zero the same size.
766 * We have to stay within the maximum heap size, and leave a certain
767 * percentage of the maximum heap size available to allocate into.
769 if (major_gc && RtsFlags.GcFlags.generations > 1) {
770 nat live, size, min_alloc;
771 nat max = RtsFlags.GcFlags.maxHeapSize;
772 nat gens = RtsFlags.GcFlags.generations;
774 // live in the oldest generations
775 live = oldest_gen->steps[0].n_blocks +
776 oldest_gen->steps[0].n_large_blocks;
778 // default max size for all generations except zero
779 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
780 RtsFlags.GcFlags.minOldGenSize);
782 // minimum size for generation zero
783 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
784 RtsFlags.GcFlags.minAllocAreaSize);
786 // Auto-enable compaction when the residency reaches a
787 // certain percentage of the maximum heap size (default: 30%).
788 if (RtsFlags.GcFlags.generations > 1 &&
789 (RtsFlags.GcFlags.compact ||
791 oldest_gen->steps[0].n_blocks >
792 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
793 oldest_gen->steps[0].is_compacted = 1;
794 // fprintf(stderr,"compaction: on\n", live);
796 oldest_gen->steps[0].is_compacted = 0;
797 // fprintf(stderr,"compaction: off\n", live);
800 // if we're going to go over the maximum heap size, reduce the
801 // size of the generations accordingly. The calculation is
802 // different if compaction is turned on, because we don't need
803 // to double the space required to collect the old generation.
806 // this test is necessary to ensure that the calculations
807 // below don't have any negative results - we're working
808 // with unsigned values here.
809 if (max < min_alloc) {
813 if (oldest_gen->steps[0].is_compacted) {
814 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
815 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
818 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
819 size = (max - min_alloc) / ((gens - 1) * 2);
829 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
830 min_alloc, size, max);
833 for (g = 0; g < gens; g++) {
834 generations[g].max_blocks = size;
838 // Guess the amount of live data for stats.
841 /* Free the small objects allocated via allocate(), since this will
842 * all have been copied into G0S1 now.
844 if (small_alloc_list != NULL) {
845 freeChain(small_alloc_list);
847 small_alloc_list = NULL;
851 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
853 // Start a new pinned_object_block
854 pinned_object_block = NULL;
856 /* Free the mark stack.
858 if (mark_stack_bdescr != NULL) {
859 freeGroup(mark_stack_bdescr);
864 for (g = 0; g <= N; g++) {
865 for (s = 0; s < generations[g].n_steps; s++) {
866 stp = &generations[g].steps[s];
867 if (stp->is_compacted && stp->bitmap != NULL) {
868 freeGroup(stp->bitmap);
873 /* Two-space collector:
874 * Free the old to-space, and estimate the amount of live data.
876 if (RtsFlags.GcFlags.generations == 1) {
879 if (old_to_blocks != NULL) {
880 freeChain(old_to_blocks);
882 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
883 bd->flags = 0; // now from-space
886 /* For a two-space collector, we need to resize the nursery. */
888 /* set up a new nursery. Allocate a nursery size based on a
889 * function of the amount of live data (by default a factor of 2)
890 * Use the blocks from the old nursery if possible, freeing up any
893 * If we get near the maximum heap size, then adjust our nursery
894 * size accordingly. If the nursery is the same size as the live
895 * data (L), then we need 3L bytes. We can reduce the size of the
896 * nursery to bring the required memory down near 2L bytes.
898 * A normal 2-space collector would need 4L bytes to give the same
899 * performance we get from 3L bytes, reducing to the same
900 * performance at 2L bytes.
902 blocks = g0s0->n_to_blocks;
904 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
905 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
906 RtsFlags.GcFlags.maxHeapSize ) {
907 long adjusted_blocks; // signed on purpose
910 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
911 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
912 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
913 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
916 blocks = adjusted_blocks;
919 blocks *= RtsFlags.GcFlags.oldGenFactor;
920 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
921 blocks = RtsFlags.GcFlags.minAllocAreaSize;
924 resizeNursery(blocks);
927 /* Generational collector:
928 * If the user has given us a suggested heap size, adjust our
929 * allocation area to make best use of the memory available.
932 if (RtsFlags.GcFlags.heapSizeSuggestion) {
934 nat needed = calcNeeded(); // approx blocks needed at next GC
936 /* Guess how much will be live in generation 0 step 0 next time.
937 * A good approximation is obtained by finding the
938 * percentage of g0s0 that was live at the last minor GC.
941 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
944 /* Estimate a size for the allocation area based on the
945 * information available. We might end up going slightly under
946 * or over the suggested heap size, but we should be pretty
949 * Formula: suggested - needed
950 * ----------------------------
951 * 1 + g0s0_pcnt_kept/100
953 * where 'needed' is the amount of memory needed at the next
954 * collection for collecting all steps except g0s0.
957 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
958 (100 + (long)g0s0_pcnt_kept);
960 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
961 blocks = RtsFlags.GcFlags.minAllocAreaSize;
964 resizeNursery((nat)blocks);
967 // we might have added extra large blocks to the nursery, so
968 // resize back to minAllocAreaSize again.
969 resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
973 // mark the garbage collected CAFs as dead
974 #if 0 && defined(DEBUG) // doesn't work at the moment
975 if (major_gc) { gcCAFs(); }
979 // resetStaticObjectForRetainerProfiling() must be called before
981 resetStaticObjectForRetainerProfiling();
984 // zero the scavenged static object list
986 zero_static_object_list(scavenged_static_objects);
992 RELEASE_LOCK(&sched_mutex);
994 // start any pending finalizers
995 scheduleFinalizers(old_weak_ptr_list);
997 // send exceptions to any threads which were about to die
998 resurrectThreads(resurrected_threads);
1000 ACQUIRE_LOCK(&sched_mutex);
1002 // Update the stable pointer hash table.
1003 updateStablePtrTable(major_gc);
1005 // check sanity after GC
1006 IF_DEBUG(sanity, checkSanity());
1008 // extra GC trace info
1009 IF_DEBUG(gc, statDescribeGens());
1012 // symbol-table based profiling
1013 /* heapCensus(to_blocks); */ /* ToDo */
1016 // restore enclosing cost centre
1021 // check for memory leaks if sanity checking is on
1022 IF_DEBUG(sanity, memInventory());
1024 #ifdef RTS_GTK_FRONTPANEL
1025 if (RtsFlags.GcFlags.frontpanel) {
1026 updateFrontPanelAfterGC( N, live );
1030 // ok, GC over: tell the stats department what happened.
1031 stat_endGC(allocated, collected, live, copied, N);
1037 /* -----------------------------------------------------------------------------
1040 traverse_weak_ptr_list is called possibly many times during garbage
1041 collection. It returns a flag indicating whether it did any work
1042 (i.e. called evacuate on any live pointers).
1044 Invariant: traverse_weak_ptr_list is called when the heap is in an
1045 idempotent state. That means that there are no pending
1046 evacuate/scavenge operations. This invariant helps the weak
1047 pointer code decide which weak pointers are dead - if there are no
1048 new live weak pointers, then all the currently unreachable ones are
1051 For generational GC: we just don't try to finalize weak pointers in
1052 older generations than the one we're collecting. This could
1053 probably be optimised by keeping per-generation lists of weak
1054 pointers, but for a few weak pointers this scheme will work.
1056 There are three distinct stages to processing weak pointers:
1058 - weak_stage == WeakPtrs
1060 We process all the weak pointers whos keys are alive (evacuate
1061 their values and finalizers), and repeat until we can find no new
1062 live keys. If no live keys are found in this pass, then we
1063 evacuate the finalizers of all the dead weak pointers in order to
1066 - weak_stage == WeakThreads
1068 Now, we discover which *threads* are still alive. Pointers to
1069 threads from the all_threads and main thread lists are the
1070 weakest of all: a pointers from the finalizer of a dead weak
1071 pointer can keep a thread alive. Any threads found to be unreachable
1072 are evacuated and placed on the resurrected_threads list so we
1073 can send them a signal later.
1075 - weak_stage == WeakDone
1077 No more evacuation is done.
1079 -------------------------------------------------------------------------- */
1082 traverse_weak_ptr_list(void)
1084 StgWeak *w, **last_w, *next_w;
1086 rtsBool flag = rtsFalse;
1088 switch (weak_stage) {
1094 /* doesn't matter where we evacuate values/finalizers to, since
1095 * these pointers are treated as roots (iff the keys are alive).
1099 last_w = &old_weak_ptr_list;
1100 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1102 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1103 * called on a live weak pointer object. Just remove it.
1105 if (w->header.info == &stg_DEAD_WEAK_info) {
1106 next_w = ((StgDeadWeak *)w)->link;
1111 switch (get_itbl(w)->type) {
1114 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1119 /* Now, check whether the key is reachable.
1121 new = isAlive(w->key);
1124 // evacuate the value and finalizer
1125 w->value = evacuate(w->value);
1126 w->finalizer = evacuate(w->finalizer);
1127 // remove this weak ptr from the old_weak_ptr list
1129 // and put it on the new weak ptr list
1131 w->link = weak_ptr_list;
1134 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p",
1139 last_w = &(w->link);
1145 barf("traverse_weak_ptr_list: not WEAK");
1149 /* If we didn't make any changes, then we can go round and kill all
1150 * the dead weak pointers. The old_weak_ptr list is used as a list
1151 * of pending finalizers later on.
1153 if (flag == rtsFalse) {
1154 for (w = old_weak_ptr_list; w; w = w->link) {
1155 w->finalizer = evacuate(w->finalizer);
1158 // Next, move to the WeakThreads stage after fully
1159 // scavenging the finalizers we've just evacuated.
1160 weak_stage = WeakThreads;
1166 /* Now deal with the all_threads list, which behaves somewhat like
1167 * the weak ptr list. If we discover any threads that are about to
1168 * become garbage, we wake them up and administer an exception.
1171 StgTSO *t, *tmp, *next, **prev;
1173 prev = &old_all_threads;
1174 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1176 (StgClosure *)tmp = isAlive((StgClosure *)t);
1182 ASSERT(get_itbl(t)->type == TSO);
1183 switch (t->what_next) {
1184 case ThreadRelocated:
1189 case ThreadComplete:
1190 // finshed or died. The thread might still be alive, but we
1191 // don't keep it on the all_threads list. Don't forget to
1192 // stub out its global_link field.
1193 next = t->global_link;
1194 t->global_link = END_TSO_QUEUE;
1202 // not alive (yet): leave this thread on the
1203 // old_all_threads list.
1204 prev = &(t->global_link);
1205 next = t->global_link;
1208 // alive: move this thread onto the all_threads list.
1209 next = t->global_link;
1210 t->global_link = all_threads;
1217 /* And resurrect any threads which were about to become garbage.
1220 StgTSO *t, *tmp, *next;
1221 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1222 next = t->global_link;
1223 (StgClosure *)tmp = evacuate((StgClosure *)t);
1224 tmp->global_link = resurrected_threads;
1225 resurrected_threads = tmp;
1229 weak_stage = WeakDone; // *now* we're done,
1230 return rtsTrue; // but one more round of scavenging, please
1233 barf("traverse_weak_ptr_list");
1238 /* -----------------------------------------------------------------------------
1239 After GC, the live weak pointer list may have forwarding pointers
1240 on it, because a weak pointer object was evacuated after being
1241 moved to the live weak pointer list. We remove those forwarding
1244 Also, we don't consider weak pointer objects to be reachable, but
1245 we must nevertheless consider them to be "live" and retain them.
1246 Therefore any weak pointer objects which haven't as yet been
1247 evacuated need to be evacuated now.
1248 -------------------------------------------------------------------------- */
1252 mark_weak_ptr_list ( StgWeak **list )
1254 StgWeak *w, **last_w;
1257 for (w = *list; w; w = w->link) {
1258 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
1259 ASSERT(w->header.info == &stg_DEAD_WEAK_info
1260 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
1261 (StgClosure *)w = evacuate((StgClosure *)w);
1263 last_w = &(w->link);
1267 /* -----------------------------------------------------------------------------
1268 isAlive determines whether the given closure is still alive (after
1269 a garbage collection) or not. It returns the new address of the
1270 closure if it is alive, or NULL otherwise.
1272 NOTE: Use it before compaction only!
1273 -------------------------------------------------------------------------- */
1277 isAlive(StgClosure *p)
1279 const StgInfoTable *info;
1286 /* ToDo: for static closures, check the static link field.
1287 * Problem here is that we sometimes don't set the link field, eg.
1288 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1294 // ignore closures in generations that we're not collecting.
1295 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1298 // large objects have an evacuated flag
1299 if (bd->flags & BF_LARGE) {
1300 if (bd->flags & BF_EVACUATED) {
1306 // check the mark bit for compacted steps
1307 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1311 switch (info->type) {
1316 case IND_OLDGEN: // rely on compatible layout with StgInd
1317 case IND_OLDGEN_PERM:
1318 // follow indirections
1319 p = ((StgInd *)p)->indirectee;
1324 return ((StgEvacuated *)p)->evacuee;
1327 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1328 p = (StgClosure *)((StgTSO *)p)->link;
1340 mark_root(StgClosure **root)
1342 *root = evacuate(*root);
1348 bdescr *bd = allocBlock();
1349 bd->gen_no = stp->gen_no;
1352 if (stp->gen_no <= N) {
1353 bd->flags = BF_EVACUATED;
1358 stp->hp_bd->free = stp->hp;
1359 stp->hp_bd->link = bd;
1360 stp->hp = bd->start;
1361 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1368 static __inline__ void
1369 upd_evacuee(StgClosure *p, StgClosure *dest)
1371 p->header.info = &stg_EVACUATED_info;
1372 ((StgEvacuated *)p)->evacuee = dest;
1376 static __inline__ StgClosure *
1377 copy(StgClosure *src, nat size, step *stp)
1382 nat size_org = size;
1385 TICK_GC_WORDS_COPIED(size);
1386 /* Find out where we're going, using the handy "to" pointer in
1387 * the step of the source object. If it turns out we need to
1388 * evacuate to an older generation, adjust it here (see comment
1391 if (stp->gen_no < evac_gen) {
1392 #ifdef NO_EAGER_PROMOTION
1393 failed_to_evac = rtsTrue;
1395 stp = &generations[evac_gen].steps[0];
1399 /* chain a new block onto the to-space for the destination step if
1402 if (stp->hp + size >= stp->hpLim) {
1406 for(to = stp->hp, from = (P_)src; size>0; --size) {
1412 upd_evacuee(src,(StgClosure *)dest);
1414 // We store the size of the just evacuated object in the LDV word so that
1415 // the profiler can guess the position of the next object later.
1416 SET_EVACUAEE_FOR_LDV(src, size_org);
1418 return (StgClosure *)dest;
1421 /* Special version of copy() for when we only want to copy the info
1422 * pointer of an object, but reserve some padding after it. This is
1423 * used to optimise evacuation of BLACKHOLEs.
1428 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1433 nat size_to_copy_org = size_to_copy;
1436 TICK_GC_WORDS_COPIED(size_to_copy);
1437 if (stp->gen_no < evac_gen) {
1438 #ifdef NO_EAGER_PROMOTION
1439 failed_to_evac = rtsTrue;
1441 stp = &generations[evac_gen].steps[0];
1445 if (stp->hp + size_to_reserve >= stp->hpLim) {
1449 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1454 stp->hp += size_to_reserve;
1455 upd_evacuee(src,(StgClosure *)dest);
1457 // We store the size of the just evacuated object in the LDV word so that
1458 // the profiler can guess the position of the next object later.
1459 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1461 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1463 if (size_to_reserve - size_to_copy_org > 0)
1464 FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1466 return (StgClosure *)dest;
1470 /* -----------------------------------------------------------------------------
1471 Evacuate a large object
1473 This just consists of removing the object from the (doubly-linked)
1474 step->large_objects list, and linking it on to the (singly-linked)
1475 step->new_large_objects list, from where it will be scavenged later.
1477 Convention: bd->flags has BF_EVACUATED set for a large object
1478 that has been evacuated, or unset otherwise.
1479 -------------------------------------------------------------------------- */
1483 evacuate_large(StgPtr p)
1485 bdescr *bd = Bdescr(p);
1488 // object must be at the beginning of the block (or be a ByteArray)
1489 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1490 (((W_)p & BLOCK_MASK) == 0));
1492 // already evacuated?
1493 if (bd->flags & BF_EVACUATED) {
1494 /* Don't forget to set the failed_to_evac flag if we didn't get
1495 * the desired destination (see comments in evacuate()).
1497 if (bd->gen_no < evac_gen) {
1498 failed_to_evac = rtsTrue;
1499 TICK_GC_FAILED_PROMOTION();
1505 // remove from large_object list
1507 bd->u.back->link = bd->link;
1508 } else { // first object in the list
1509 stp->large_objects = bd->link;
1512 bd->link->u.back = bd->u.back;
1515 /* link it on to the evacuated large object list of the destination step
1518 if (stp->gen_no < evac_gen) {
1519 #ifdef NO_EAGER_PROMOTION
1520 failed_to_evac = rtsTrue;
1522 stp = &generations[evac_gen].steps[0];
1527 bd->gen_no = stp->gen_no;
1528 bd->link = stp->new_large_objects;
1529 stp->new_large_objects = bd;
1530 bd->flags |= BF_EVACUATED;
1533 /* -----------------------------------------------------------------------------
1534 Adding a MUT_CONS to an older generation.
1536 This is necessary from time to time when we end up with an
1537 old-to-new generation pointer in a non-mutable object. We defer
1538 the promotion until the next GC.
1539 -------------------------------------------------------------------------- */
1543 mkMutCons(StgClosure *ptr, generation *gen)
1548 stp = &gen->steps[0];
1550 /* chain a new block onto the to-space for the destination step if
1553 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1557 q = (StgMutVar *)stp->hp;
1558 stp->hp += sizeofW(StgMutVar);
1560 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1562 recordOldToNewPtrs((StgMutClosure *)q);
1564 return (StgClosure *)q;
1567 /* -----------------------------------------------------------------------------
1570 This is called (eventually) for every live object in the system.
1572 The caller to evacuate specifies a desired generation in the
1573 evac_gen global variable. The following conditions apply to
1574 evacuating an object which resides in generation M when we're
1575 collecting up to generation N
1579 else evac to step->to
1581 if M < evac_gen evac to evac_gen, step 0
1583 if the object is already evacuated, then we check which generation
1586 if M >= evac_gen do nothing
1587 if M < evac_gen set failed_to_evac flag to indicate that we
1588 didn't manage to evacuate this object into evac_gen.
1590 -------------------------------------------------------------------------- */
1593 evacuate(StgClosure *q)
1598 const StgInfoTable *info;
1601 if (HEAP_ALLOCED(q)) {
1604 if (bd->gen_no > N) {
1605 /* Can't evacuate this object, because it's in a generation
1606 * older than the ones we're collecting. Let's hope that it's
1607 * in evac_gen or older, or we will have to arrange to track
1608 * this pointer using the mutable list.
1610 if (bd->gen_no < evac_gen) {
1612 failed_to_evac = rtsTrue;
1613 TICK_GC_FAILED_PROMOTION();
1618 /* evacuate large objects by re-linking them onto a different list.
1620 if (bd->flags & BF_LARGE) {
1622 if (info->type == TSO &&
1623 ((StgTSO *)q)->what_next == ThreadRelocated) {
1624 q = (StgClosure *)((StgTSO *)q)->link;
1627 evacuate_large((P_)q);
1631 /* If the object is in a step that we're compacting, then we
1632 * need to use an alternative evacuate procedure.
1634 if (bd->step->is_compacted) {
1635 if (!is_marked((P_)q,bd)) {
1637 if (mark_stack_full()) {
1638 mark_stack_overflowed = rtsTrue;
1641 push_mark_stack((P_)q);
1649 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1652 // make sure the info pointer is into text space
1653 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1654 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1657 switch (info -> type) {
1661 to = copy(q,sizeW_fromITBL(info),stp);
1666 StgWord w = (StgWord)q->payload[0];
1667 if (q->header.info == Czh_con_info &&
1668 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1669 (StgChar)w <= MAX_CHARLIKE) {
1670 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1672 if (q->header.info == Izh_con_info &&
1673 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1674 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1676 // else, fall through ...
1682 return copy(q,sizeofW(StgHeader)+1,stp);
1684 case THUNK_1_0: // here because of MIN_UPD_SIZE
1689 #ifdef NO_PROMOTE_THUNKS
1690 if (bd->gen_no == 0 &&
1691 bd->step->no != 0 &&
1692 bd->step->no == generations[bd->gen_no].n_steps-1) {
1696 return copy(q,sizeofW(StgHeader)+2,stp);
1704 return copy(q,sizeofW(StgHeader)+2,stp);
1710 case IND_OLDGEN_PERM:
1715 return copy(q,sizeW_fromITBL(info),stp);
1718 case SE_CAF_BLACKHOLE:
1721 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1724 to = copy(q,BLACKHOLE_sizeW(),stp);
1727 case THUNK_SELECTOR:
1731 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
1732 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1735 p = eval_thunk_selector(info->layout.selector_offset,
1739 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1741 // q is still BLACKHOLE'd.
1742 thunk_selector_depth++;
1744 thunk_selector_depth--;
1752 // follow chains of indirections, don't evacuate them
1753 q = ((StgInd*)q)->indirectee;
1757 if (info->srt_len > 0 && major_gc &&
1758 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1759 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1760 static_objects = (StgClosure *)q;
1765 if (info->srt_len > 0 && major_gc &&
1766 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1767 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1768 static_objects = (StgClosure *)q;
1773 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1774 * on the CAF list, so don't do anything with it here (we'll
1775 * scavenge it later).
1778 && ((StgIndStatic *)q)->saved_info == NULL
1779 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1780 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1781 static_objects = (StgClosure *)q;
1786 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1787 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1788 static_objects = (StgClosure *)q;
1792 case CONSTR_INTLIKE:
1793 case CONSTR_CHARLIKE:
1794 case CONSTR_NOCAF_STATIC:
1795 /* no need to put these on the static linked list, they don't need
1810 // shouldn't see these
1811 barf("evacuate: stack frame at %p\n", q);
1815 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1816 * of stack, tagging and all.
1818 return copy(q,pap_sizeW((StgPAP*)q),stp);
1821 /* Already evacuated, just return the forwarding address.
1822 * HOWEVER: if the requested destination generation (evac_gen) is
1823 * older than the actual generation (because the object was
1824 * already evacuated to a younger generation) then we have to
1825 * set the failed_to_evac flag to indicate that we couldn't
1826 * manage to promote the object to the desired generation.
1828 if (evac_gen > 0) { // optimisation
1829 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1830 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
1831 failed_to_evac = rtsTrue;
1832 TICK_GC_FAILED_PROMOTION();
1835 return ((StgEvacuated*)q)->evacuee;
1838 // just copy the block
1839 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1842 case MUT_ARR_PTRS_FROZEN:
1843 // just copy the block
1844 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1848 StgTSO *tso = (StgTSO *)q;
1850 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1852 if (tso->what_next == ThreadRelocated) {
1853 q = (StgClosure *)tso->link;
1857 /* To evacuate a small TSO, we need to relocate the update frame
1861 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1862 move_TSO(tso, new_tso);
1863 return (StgClosure *)new_tso;
1868 case RBH: // cf. BLACKHOLE_BQ
1870 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1871 to = copy(q,BLACKHOLE_sizeW(),stp);
1872 //ToDo: derive size etc from reverted IP
1873 //to = copy(q,size,stp);
1875 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1876 q, info_type(q), to, info_type(to)));
1881 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1882 to = copy(q,sizeofW(StgBlockedFetch),stp);
1884 belch("@@ evacuate: %p (%s) to %p (%s)",
1885 q, info_type(q), to, info_type(to)));
1892 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1893 to = copy(q,sizeofW(StgFetchMe),stp);
1895 belch("@@ evacuate: %p (%s) to %p (%s)",
1896 q, info_type(q), to, info_type(to)));
1900 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1901 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1903 belch("@@ evacuate: %p (%s) to %p (%s)",
1904 q, info_type(q), to, info_type(to)));
1909 barf("evacuate: strange closure type %d", (int)(info->type));
1915 /* -----------------------------------------------------------------------------
1916 Evaluate a THUNK_SELECTOR if possible.
1918 returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
1919 a closure pointer if we evaluated it and this is the result
1921 If the return value is non-NULL, the original selector thunk has
1922 been BLACKHOLE'd, and should be updated with an indirection or a
1923 forwarding pointer. If the return value is NULL, then the selector
1925 -------------------------------------------------------------------------- */
1928 eval_thunk_selector( nat field, StgSelector * p )
1931 const StgInfoTable *info_ptr;
1932 StgClosure *selectee;
1934 selectee = p->selectee;
1936 // Save the real info pointer (NOTE: not the same as get_itbl()).
1937 info_ptr = p->header.info;
1939 // BLACKHOLE the selector thunk, since it is now under evaluation.
1940 // This is important to stop us going into an infinite loop if
1941 // this selector thunk eventually refers to itself.
1942 SET_INFO(p,&stg_BLACKHOLE_info);
1945 info = get_itbl(selectee);
1946 switch (info->type) {
1954 case CONSTR_NOCAF_STATIC:
1955 // check that the size is in range
1956 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
1957 info->layout.payload.nptrs));
1959 return selectee->payload[field];
1965 case IND_OLDGEN_PERM:
1966 selectee = ((StgInd *)selectee)->indirectee;
1970 // We don't follow pointers into to-space; the constructor
1971 // has already been evacuated, so we won't save any space
1972 // leaks by evaluating this selector thunk anyhow.
1975 case THUNK_SELECTOR:
1979 // check that we don't recurse too much, re-using the
1980 // depth bound also used in evacuate().
1981 thunk_selector_depth++;
1982 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
1986 val = eval_thunk_selector(info->layout.selector_offset,
1987 (StgSelector *)selectee);
1989 thunk_selector_depth--;
1994 // we evaluated this selector thunk, so update it with
1996 UPD_IND_NOLOCK(selectee, val);
2011 case SE_CAF_BLACKHOLE:
2024 // not evaluated yet
2028 barf("eval_thunk_selector: strange selectee %d",
2032 // We didn't manage to evaluate this thunk; restore the old info pointer
2033 SET_INFO(p, info_ptr);
2037 /* -----------------------------------------------------------------------------
2038 move_TSO is called to update the TSO structure after it has been
2039 moved from one place to another.
2040 -------------------------------------------------------------------------- */
2043 move_TSO(StgTSO *src, StgTSO *dest)
2047 // relocate the stack pointers...
2048 diff = (StgPtr)dest - (StgPtr)src; // In *words*
2049 dest->sp = (StgPtr)dest->sp + diff;
2050 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
2052 relocate_stack(dest, diff);
2055 /* -----------------------------------------------------------------------------
2056 relocate_stack is called to update the linkage between
2057 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
2059 -------------------------------------------------------------------------- */
2062 relocate_stack(StgTSO *dest, ptrdiff_t diff)
2070 while ((P_)su < dest->stack + dest->stack_size) {
2071 switch (get_itbl(su)->type) {
2073 // GCC actually manages to common up these three cases!
2076 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
2081 cf = (StgCatchFrame *)su;
2082 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
2087 sf = (StgSeqFrame *)su;
2088 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
2097 barf("relocate_stack %d", (int)(get_itbl(su)->type));
2108 scavenge_srt(const StgInfoTable *info)
2110 StgClosure **srt, **srt_end;
2112 /* evacuate the SRT. If srt_len is zero, then there isn't an
2113 * srt field in the info table. That's ok, because we'll
2114 * never dereference it.
2116 srt = (StgClosure **)(info->srt);
2117 srt_end = srt + info->srt_len;
2118 for (; srt < srt_end; srt++) {
2119 /* Special-case to handle references to closures hiding out in DLLs, since
2120 double indirections required to get at those. The code generator knows
2121 which is which when generating the SRT, so it stores the (indirect)
2122 reference to the DLL closure in the table by first adding one to it.
2123 We check for this here, and undo the addition before evacuating it.
2125 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2126 closure that's fixed at link-time, and no extra magic is required.
2128 #ifdef ENABLE_WIN32_DLL_SUPPORT
2129 if ( (unsigned long)(*srt) & 0x1 ) {
2130 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2140 /* -----------------------------------------------------------------------------
2142 -------------------------------------------------------------------------- */
2145 scavengeTSO (StgTSO *tso)
2147 // chase the link field for any TSOs on the same queue
2148 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2149 if ( tso->why_blocked == BlockedOnMVar
2150 || tso->why_blocked == BlockedOnBlackHole
2151 || tso->why_blocked == BlockedOnException
2153 || tso->why_blocked == BlockedOnGA
2154 || tso->why_blocked == BlockedOnGA_NoSend
2157 tso->block_info.closure = evacuate(tso->block_info.closure);
2159 if ( tso->blocked_exceptions != NULL ) {
2160 tso->blocked_exceptions =
2161 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2163 // scavenge this thread's stack
2164 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2167 /* -----------------------------------------------------------------------------
2168 Scavenge a given step until there are no more objects in this step
2171 evac_gen is set by the caller to be either zero (for a step in a
2172 generation < N) or G where G is the generation of the step being
2175 We sometimes temporarily change evac_gen back to zero if we're
2176 scavenging a mutable object where early promotion isn't such a good
2178 -------------------------------------------------------------------------- */
2186 nat saved_evac_gen = evac_gen;
2191 failed_to_evac = rtsFalse;
2193 /* scavenge phase - standard breadth-first scavenging of the
2197 while (bd != stp->hp_bd || p < stp->hp) {
2199 // If we're at the end of this block, move on to the next block
2200 if (bd != stp->hp_bd && p == bd->free) {
2206 info = get_itbl((StgClosure *)p);
2207 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2209 ASSERT(thunk_selector_depth == 0);
2212 switch (info->type) {
2215 /* treat MVars specially, because we don't want to evacuate the
2216 * mut_link field in the middle of the closure.
2219 StgMVar *mvar = ((StgMVar *)p);
2221 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2222 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2223 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2224 evac_gen = saved_evac_gen;
2225 recordMutable((StgMutClosure *)mvar);
2226 failed_to_evac = rtsFalse; // mutable.
2227 p += sizeofW(StgMVar);
2235 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2236 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2237 p += sizeofW(StgHeader) + 2;
2242 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2243 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2249 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2250 p += sizeofW(StgHeader) + 1;
2255 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2261 p += sizeofW(StgHeader) + 1;
2268 p += sizeofW(StgHeader) + 2;
2275 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2276 p += sizeofW(StgHeader) + 2;
2292 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2293 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2294 (StgClosure *)*p = evacuate((StgClosure *)*p);
2296 p += info->layout.payload.nptrs;
2301 if (stp->gen->no != 0) {
2304 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2305 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2306 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2309 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
2311 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2314 // We pretend that p has just been created.
2315 LDV_recordCreate((StgClosure *)p);
2319 case IND_OLDGEN_PERM:
2320 ((StgIndOldGen *)p)->indirectee =
2321 evacuate(((StgIndOldGen *)p)->indirectee);
2322 if (failed_to_evac) {
2323 failed_to_evac = rtsFalse;
2324 recordOldToNewPtrs((StgMutClosure *)p);
2326 p += sizeofW(StgIndOldGen);
2331 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2332 evac_gen = saved_evac_gen;
2333 recordMutable((StgMutClosure *)p);
2334 failed_to_evac = rtsFalse; // mutable anyhow
2335 p += sizeofW(StgMutVar);
2340 failed_to_evac = rtsFalse; // mutable anyhow
2341 p += sizeofW(StgMutVar);
2345 case SE_CAF_BLACKHOLE:
2348 p += BLACKHOLE_sizeW();
2353 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2354 (StgClosure *)bh->blocking_queue =
2355 evacuate((StgClosure *)bh->blocking_queue);
2356 recordMutable((StgMutClosure *)bh);
2357 failed_to_evac = rtsFalse;
2358 p += BLACKHOLE_sizeW();
2362 case THUNK_SELECTOR:
2364 StgSelector *s = (StgSelector *)p;
2365 s->selectee = evacuate(s->selectee);
2366 p += THUNK_SELECTOR_sizeW();
2370 case AP_UPD: // same as PAPs
2372 /* Treat a PAP just like a section of stack, not forgetting to
2373 * evacuate the function pointer too...
2376 StgPAP* pap = (StgPAP *)p;
2378 pap->fun = evacuate(pap->fun);
2379 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2380 p += pap_sizeW(pap);
2385 // nothing to follow
2386 p += arr_words_sizeW((StgArrWords *)p);
2390 // follow everything
2394 evac_gen = 0; // repeatedly mutable
2395 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2396 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2397 (StgClosure *)*p = evacuate((StgClosure *)*p);
2399 evac_gen = saved_evac_gen;
2400 recordMutable((StgMutClosure *)q);
2401 failed_to_evac = rtsFalse; // mutable anyhow.
2405 case MUT_ARR_PTRS_FROZEN:
2406 // follow everything
2410 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2411 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2412 (StgClosure *)*p = evacuate((StgClosure *)*p);
2414 // it's tempting to recordMutable() if failed_to_evac is
2415 // false, but that breaks some assumptions (eg. every
2416 // closure on the mutable list is supposed to have the MUT
2417 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2423 StgTSO *tso = (StgTSO *)p;
2426 evac_gen = saved_evac_gen;
2427 recordMutable((StgMutClosure *)tso);
2428 failed_to_evac = rtsFalse; // mutable anyhow.
2429 p += tso_sizeW(tso);
2434 case RBH: // cf. BLACKHOLE_BQ
2437 nat size, ptrs, nonptrs, vhs;
2439 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2441 StgRBH *rbh = (StgRBH *)p;
2442 (StgClosure *)rbh->blocking_queue =
2443 evacuate((StgClosure *)rbh->blocking_queue);
2444 recordMutable((StgMutClosure *)to);
2445 failed_to_evac = rtsFalse; // mutable anyhow.
2447 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2448 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2449 // ToDo: use size of reverted closure here!
2450 p += BLACKHOLE_sizeW();
2456 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2457 // follow the pointer to the node which is being demanded
2458 (StgClosure *)bf->node =
2459 evacuate((StgClosure *)bf->node);
2460 // follow the link to the rest of the blocking queue
2461 (StgClosure *)bf->link =
2462 evacuate((StgClosure *)bf->link);
2463 if (failed_to_evac) {
2464 failed_to_evac = rtsFalse;
2465 recordMutable((StgMutClosure *)bf);
2468 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2469 bf, info_type((StgClosure *)bf),
2470 bf->node, info_type(bf->node)));
2471 p += sizeofW(StgBlockedFetch);
2479 p += sizeofW(StgFetchMe);
2480 break; // nothing to do in this case
2482 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2484 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2485 (StgClosure *)fmbq->blocking_queue =
2486 evacuate((StgClosure *)fmbq->blocking_queue);
2487 if (failed_to_evac) {
2488 failed_to_evac = rtsFalse;
2489 recordMutable((StgMutClosure *)fmbq);
2492 belch("@@ scavenge: %p (%s) exciting, isn't it",
2493 p, info_type((StgClosure *)p)));
2494 p += sizeofW(StgFetchMeBlockingQueue);
2500 barf("scavenge: unimplemented/strange closure type %d @ %p",
2504 /* If we didn't manage to promote all the objects pointed to by
2505 * the current object, then we have to designate this object as
2506 * mutable (because it contains old-to-new generation pointers).
2508 if (failed_to_evac) {
2509 failed_to_evac = rtsFalse;
2510 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2518 /* -----------------------------------------------------------------------------
2519 Scavenge everything on the mark stack.
2521 This is slightly different from scavenge():
2522 - we don't walk linearly through the objects, so the scavenger
2523 doesn't need to advance the pointer on to the next object.
2524 -------------------------------------------------------------------------- */
2527 scavenge_mark_stack(void)
2533 evac_gen = oldest_gen->no;
2534 saved_evac_gen = evac_gen;
2537 while (!mark_stack_empty()) {
2538 p = pop_mark_stack();
2540 info = get_itbl((StgClosure *)p);
2541 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2544 switch (info->type) {
2547 /* treat MVars specially, because we don't want to evacuate the
2548 * mut_link field in the middle of the closure.
2551 StgMVar *mvar = ((StgMVar *)p);
2553 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2554 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2555 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2556 evac_gen = saved_evac_gen;
2557 failed_to_evac = rtsFalse; // mutable.
2565 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2566 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2576 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2601 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2602 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2603 (StgClosure *)*p = evacuate((StgClosure *)*p);
2609 // don't need to do anything here: the only possible case
2610 // is that we're in a 1-space compacting collector, with
2611 // no "old" generation.
2615 case IND_OLDGEN_PERM:
2616 ((StgIndOldGen *)p)->indirectee =
2617 evacuate(((StgIndOldGen *)p)->indirectee);
2618 if (failed_to_evac) {
2619 recordOldToNewPtrs((StgMutClosure *)p);
2621 failed_to_evac = rtsFalse;
2626 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2627 evac_gen = saved_evac_gen;
2628 failed_to_evac = rtsFalse;
2633 failed_to_evac = rtsFalse;
2637 case SE_CAF_BLACKHOLE:
2645 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2646 (StgClosure *)bh->blocking_queue =
2647 evacuate((StgClosure *)bh->blocking_queue);
2648 failed_to_evac = rtsFalse;
2652 case THUNK_SELECTOR:
2654 StgSelector *s = (StgSelector *)p;
2655 s->selectee = evacuate(s->selectee);
2659 case AP_UPD: // same as PAPs
2661 /* Treat a PAP just like a section of stack, not forgetting to
2662 * evacuate the function pointer too...
2665 StgPAP* pap = (StgPAP *)p;
2667 pap->fun = evacuate(pap->fun);
2668 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2673 // follow everything
2677 evac_gen = 0; // repeatedly mutable
2678 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2679 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2680 (StgClosure *)*p = evacuate((StgClosure *)*p);
2682 evac_gen = saved_evac_gen;
2683 failed_to_evac = rtsFalse; // mutable anyhow.
2687 case MUT_ARR_PTRS_FROZEN:
2688 // follow everything
2692 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2693 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2694 (StgClosure *)*p = evacuate((StgClosure *)*p);
2701 StgTSO *tso = (StgTSO *)p;
2704 evac_gen = saved_evac_gen;
2705 failed_to_evac = rtsFalse;
2710 case RBH: // cf. BLACKHOLE_BQ
2713 nat size, ptrs, nonptrs, vhs;
2715 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2717 StgRBH *rbh = (StgRBH *)p;
2718 (StgClosure *)rbh->blocking_queue =
2719 evacuate((StgClosure *)rbh->blocking_queue);
2720 recordMutable((StgMutClosure *)rbh);
2721 failed_to_evac = rtsFalse; // mutable anyhow.
2723 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2724 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2730 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2731 // follow the pointer to the node which is being demanded
2732 (StgClosure *)bf->node =
2733 evacuate((StgClosure *)bf->node);
2734 // follow the link to the rest of the blocking queue
2735 (StgClosure *)bf->link =
2736 evacuate((StgClosure *)bf->link);
2737 if (failed_to_evac) {
2738 failed_to_evac = rtsFalse;
2739 recordMutable((StgMutClosure *)bf);
2742 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2743 bf, info_type((StgClosure *)bf),
2744 bf->node, info_type(bf->node)));
2752 break; // nothing to do in this case
2754 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2756 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2757 (StgClosure *)fmbq->blocking_queue =
2758 evacuate((StgClosure *)fmbq->blocking_queue);
2759 if (failed_to_evac) {
2760 failed_to_evac = rtsFalse;
2761 recordMutable((StgMutClosure *)fmbq);
2764 belch("@@ scavenge: %p (%s) exciting, isn't it",
2765 p, info_type((StgClosure *)p)));
2771 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2775 if (failed_to_evac) {
2776 failed_to_evac = rtsFalse;
2777 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2780 // mark the next bit to indicate "scavenged"
2781 mark(q+1, Bdescr(q));
2783 } // while (!mark_stack_empty())
2785 // start a new linear scan if the mark stack overflowed at some point
2786 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2787 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2788 mark_stack_overflowed = rtsFalse;
2789 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2790 oldgen_scan = oldgen_scan_bd->start;
2793 if (oldgen_scan_bd) {
2794 // push a new thing on the mark stack
2796 // find a closure that is marked but not scavenged, and start
2798 while (oldgen_scan < oldgen_scan_bd->free
2799 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2803 if (oldgen_scan < oldgen_scan_bd->free) {
2805 // already scavenged?
2806 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2807 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2810 push_mark_stack(oldgen_scan);
2811 // ToDo: bump the linear scan by the actual size of the object
2812 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2816 oldgen_scan_bd = oldgen_scan_bd->link;
2817 if (oldgen_scan_bd != NULL) {
2818 oldgen_scan = oldgen_scan_bd->start;
2824 /* -----------------------------------------------------------------------------
2825 Scavenge one object.
2827 This is used for objects that are temporarily marked as mutable
2828 because they contain old-to-new generation pointers. Only certain
2829 objects can have this property.
2830 -------------------------------------------------------------------------- */
2833 scavenge_one(StgPtr p)
2835 const StgInfoTable *info;
2836 nat saved_evac_gen = evac_gen;
2839 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2840 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2842 info = get_itbl((StgClosure *)p);
2844 switch (info->type) {
2847 case FUN_1_0: // hardly worth specialising these guys
2867 case IND_OLDGEN_PERM:
2871 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2872 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2873 (StgClosure *)*q = evacuate((StgClosure *)*q);
2879 case SE_CAF_BLACKHOLE:
2884 case THUNK_SELECTOR:
2886 StgSelector *s = (StgSelector *)p;
2887 s->selectee = evacuate(s->selectee);
2892 // nothing to follow
2897 // follow everything
2900 evac_gen = 0; // repeatedly mutable
2901 recordMutable((StgMutClosure *)p);
2902 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2903 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2904 (StgClosure *)*p = evacuate((StgClosure *)*p);
2906 evac_gen = saved_evac_gen;
2907 failed_to_evac = rtsFalse;
2911 case MUT_ARR_PTRS_FROZEN:
2913 // follow everything
2916 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2917 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2918 (StgClosure *)*p = evacuate((StgClosure *)*p);
2925 StgTSO *tso = (StgTSO *)p;
2927 evac_gen = 0; // repeatedly mutable
2929 recordMutable((StgMutClosure *)tso);
2930 evac_gen = saved_evac_gen;
2931 failed_to_evac = rtsFalse;
2938 StgPAP* pap = (StgPAP *)p;
2939 pap->fun = evacuate(pap->fun);
2940 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2945 // This might happen if for instance a MUT_CONS was pointing to a
2946 // THUNK which has since been updated. The IND_OLDGEN will
2947 // be on the mutable list anyway, so we don't need to do anything
2952 barf("scavenge_one: strange object %d", (int)(info->type));
2955 no_luck = failed_to_evac;
2956 failed_to_evac = rtsFalse;
2960 /* -----------------------------------------------------------------------------
2961 Scavenging mutable lists.
2963 We treat the mutable list of each generation > N (i.e. all the
2964 generations older than the one being collected) as roots. We also
2965 remove non-mutable objects from the mutable list at this point.
2966 -------------------------------------------------------------------------- */
2969 scavenge_mut_once_list(generation *gen)
2971 const StgInfoTable *info;
2972 StgMutClosure *p, *next, *new_list;
2974 p = gen->mut_once_list;
2975 new_list = END_MUT_LIST;
2979 failed_to_evac = rtsFalse;
2981 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2983 // make sure the info pointer is into text space
2984 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2985 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2989 if (info->type==RBH)
2990 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2992 switch(info->type) {
2995 case IND_OLDGEN_PERM:
2997 /* Try to pull the indirectee into this generation, so we can
2998 * remove the indirection from the mutable list.
3000 ((StgIndOldGen *)p)->indirectee =
3001 evacuate(((StgIndOldGen *)p)->indirectee);
3003 #if 0 && defined(DEBUG)
3004 if (RtsFlags.DebugFlags.gc)
3005 /* Debugging code to print out the size of the thing we just
3009 StgPtr start = gen->steps[0].scan;
3010 bdescr *start_bd = gen->steps[0].scan_bd;
3012 scavenge(&gen->steps[0]);
3013 if (start_bd != gen->steps[0].scan_bd) {
3014 size += (P_)BLOCK_ROUND_UP(start) - start;
3015 start_bd = start_bd->link;
3016 while (start_bd != gen->steps[0].scan_bd) {
3017 size += BLOCK_SIZE_W;
3018 start_bd = start_bd->link;
3020 size += gen->steps[0].scan -
3021 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
3023 size = gen->steps[0].scan - start;
3025 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
3029 /* failed_to_evac might happen if we've got more than two
3030 * generations, we're collecting only generation 0, the
3031 * indirection resides in generation 2 and the indirectee is
3034 if (failed_to_evac) {
3035 failed_to_evac = rtsFalse;
3036 p->mut_link = new_list;
3039 /* the mut_link field of an IND_STATIC is overloaded as the
3040 * static link field too (it just so happens that we don't need
3041 * both at the same time), so we need to NULL it out when
3042 * removing this object from the mutable list because the static
3043 * link fields are all assumed to be NULL before doing a major
3051 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
3052 * it from the mutable list if possible by promoting whatever it
3055 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
3056 /* didn't manage to promote everything, so put the
3057 * MUT_CONS back on the list.
3059 p->mut_link = new_list;
3065 // shouldn't have anything else on the mutables list
3066 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
3070 gen->mut_once_list = new_list;
3075 scavenge_mutable_list(generation *gen)
3077 const StgInfoTable *info;
3078 StgMutClosure *p, *next;
3080 p = gen->saved_mut_list;
3084 failed_to_evac = rtsFalse;
3086 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3088 // make sure the info pointer is into text space
3089 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3090 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3094 if (info->type==RBH)
3095 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3097 switch(info->type) {
3100 // follow everything
3101 p->mut_link = gen->mut_list;
3106 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3107 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3108 (StgClosure *)*q = evacuate((StgClosure *)*q);
3113 // Happens if a MUT_ARR_PTRS in the old generation is frozen
3114 case MUT_ARR_PTRS_FROZEN:
3119 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3120 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3121 (StgClosure *)*q = evacuate((StgClosure *)*q);
3125 if (failed_to_evac) {
3126 failed_to_evac = rtsFalse;
3127 mkMutCons((StgClosure *)p, gen);
3133 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3134 p->mut_link = gen->mut_list;
3140 StgMVar *mvar = (StgMVar *)p;
3141 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
3142 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
3143 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
3144 p->mut_link = gen->mut_list;
3151 StgTSO *tso = (StgTSO *)p;
3155 /* Don't take this TSO off the mutable list - it might still
3156 * point to some younger objects (because we set evac_gen to 0
3159 tso->mut_link = gen->mut_list;
3160 gen->mut_list = (StgMutClosure *)tso;
3166 StgBlockingQueue *bh = (StgBlockingQueue *)p;
3167 (StgClosure *)bh->blocking_queue =
3168 evacuate((StgClosure *)bh->blocking_queue);
3169 p->mut_link = gen->mut_list;
3174 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3177 case IND_OLDGEN_PERM:
3178 /* Try to pull the indirectee into this generation, so we can
3179 * remove the indirection from the mutable list.
3182 ((StgIndOldGen *)p)->indirectee =
3183 evacuate(((StgIndOldGen *)p)->indirectee);
3186 if (failed_to_evac) {
3187 failed_to_evac = rtsFalse;
3188 p->mut_link = gen->mut_once_list;
3189 gen->mut_once_list = p;
3196 // HWL: check whether all of these are necessary
3198 case RBH: // cf. BLACKHOLE_BQ
3200 // nat size, ptrs, nonptrs, vhs;
3202 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3203 StgRBH *rbh = (StgRBH *)p;
3204 (StgClosure *)rbh->blocking_queue =
3205 evacuate((StgClosure *)rbh->blocking_queue);
3206 if (failed_to_evac) {
3207 failed_to_evac = rtsFalse;
3208 recordMutable((StgMutClosure *)rbh);
3210 // ToDo: use size of reverted closure here!
3211 p += BLACKHOLE_sizeW();
3217 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3218 // follow the pointer to the node which is being demanded
3219 (StgClosure *)bf->node =
3220 evacuate((StgClosure *)bf->node);
3221 // follow the link to the rest of the blocking queue
3222 (StgClosure *)bf->link =
3223 evacuate((StgClosure *)bf->link);
3224 if (failed_to_evac) {
3225 failed_to_evac = rtsFalse;
3226 recordMutable((StgMutClosure *)bf);
3228 p += sizeofW(StgBlockedFetch);
3234 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3237 p += sizeofW(StgFetchMe);
3238 break; // nothing to do in this case
3240 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3242 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3243 (StgClosure *)fmbq->blocking_queue =
3244 evacuate((StgClosure *)fmbq->blocking_queue);
3245 if (failed_to_evac) {
3246 failed_to_evac = rtsFalse;
3247 recordMutable((StgMutClosure *)fmbq);
3249 p += sizeofW(StgFetchMeBlockingQueue);
3255 // shouldn't have anything else on the mutables list
3256 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3263 scavenge_static(void)
3265 StgClosure* p = static_objects;
3266 const StgInfoTable *info;
3268 /* Always evacuate straight to the oldest generation for static
3270 evac_gen = oldest_gen->no;
3272 /* keep going until we've scavenged all the objects on the linked
3274 while (p != END_OF_STATIC_LIST) {
3278 if (info->type==RBH)
3279 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3281 // make sure the info pointer is into text space
3282 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3283 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3285 /* Take this object *off* the static_objects list,
3286 * and put it on the scavenged_static_objects list.
3288 static_objects = STATIC_LINK(info,p);
3289 STATIC_LINK(info,p) = scavenged_static_objects;
3290 scavenged_static_objects = p;
3292 switch (info -> type) {
3296 StgInd *ind = (StgInd *)p;
3297 ind->indirectee = evacuate(ind->indirectee);
3299 /* might fail to evacuate it, in which case we have to pop it
3300 * back on the mutable list (and take it off the
3301 * scavenged_static list because the static link and mut link
3302 * pointers are one and the same).
3304 if (failed_to_evac) {
3305 failed_to_evac = rtsFalse;
3306 scavenged_static_objects = IND_STATIC_LINK(p);
3307 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3308 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3322 next = (P_)p->payload + info->layout.payload.ptrs;
3323 // evacuate the pointers
3324 for (q = (P_)p->payload; q < next; q++) {
3325 (StgClosure *)*q = evacuate((StgClosure *)*q);
3331 barf("scavenge_static: strange closure %d", (int)(info->type));
3334 ASSERT(failed_to_evac == rtsFalse);
3336 /* get the next static object from the list. Remember, there might
3337 * be more stuff on this list now that we've done some evacuating!
3338 * (static_objects is a global)
3344 /* -----------------------------------------------------------------------------
3345 scavenge_stack walks over a section of stack and evacuates all the
3346 objects pointed to by it. We can use the same code for walking
3347 PAPs, since these are just sections of copied stack.
3348 -------------------------------------------------------------------------- */
3351 scavenge_stack(StgPtr p, StgPtr stack_end)
3354 const StgInfoTable* info;
3357 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3360 * Each time around this loop, we are looking at a chunk of stack
3361 * that starts with either a pending argument section or an
3362 * activation record.
3365 while (p < stack_end) {
3368 // If we've got a tag, skip over that many words on the stack
3369 if (IS_ARG_TAG((W_)q)) {
3374 /* Is q a pointer to a closure?
3376 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3378 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3379 ASSERT(closure_STATIC((StgClosure *)q));
3381 // otherwise, must be a pointer into the allocation space.
3384 (StgClosure *)*p = evacuate((StgClosure *)q);
3390 * Otherwise, q must be the info pointer of an activation
3391 * record. All activation records have 'bitmap' style layout
3394 info = get_itbl((StgClosure *)p);
3396 switch (info->type) {
3398 // Dynamic bitmap: the mask is stored on the stack
3400 bitmap = ((StgRetDyn *)p)->liveness;
3401 p = (P_)&((StgRetDyn *)p)->payload[0];
3404 // probably a slow-entry point return address:
3412 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3413 old_p, p, old_p+1));
3415 p++; // what if FHS!=1 !? -- HWL
3420 /* Specialised code for update frames, since they're so common.
3421 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3422 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3426 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3428 p += sizeofW(StgUpdateFrame);
3431 frame->updatee = evacuate(frame->updatee);
3433 #else // specialised code for update frames, not sure if it's worth it.
3435 nat type = get_itbl(frame->updatee)->type;
3437 if (type == EVACUATED) {
3438 frame->updatee = evacuate(frame->updatee);
3441 bdescr *bd = Bdescr((P_)frame->updatee);
3443 if (bd->gen_no > N) {
3444 if (bd->gen_no < evac_gen) {
3445 failed_to_evac = rtsTrue;
3450 // Don't promote blackholes
3452 if (!(stp->gen_no == 0 &&
3454 stp->no == stp->gen->n_steps-1)) {
3461 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3462 sizeofW(StgHeader), stp);
3463 frame->updatee = to;
3466 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3467 frame->updatee = to;
3468 recordMutable((StgMutClosure *)to);
3471 /* will never be SE_{,CAF_}BLACKHOLE, since we
3472 don't push an update frame for single-entry thunks. KSW 1999-01. */
3473 barf("scavenge_stack: UPDATE_FRAME updatee");
3479 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3486 bitmap = info->layout.bitmap;
3488 // this assumes that the payload starts immediately after the info-ptr
3490 while (bitmap != 0) {
3491 if ((bitmap & 1) == 0) {
3492 (StgClosure *)*p = evacuate((StgClosure *)*p);
3495 bitmap = bitmap >> 1;
3502 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3507 StgLargeBitmap *large_bitmap;
3510 large_bitmap = info->layout.large_bitmap;
3513 for (i=0; i<large_bitmap->size; i++) {
3514 bitmap = large_bitmap->bitmap[i];
3515 q = p + BITS_IN(W_);
3516 while (bitmap != 0) {
3517 if ((bitmap & 1) == 0) {
3518 (StgClosure *)*p = evacuate((StgClosure *)*p);
3521 bitmap = bitmap >> 1;
3523 if (i+1 < large_bitmap->size) {
3525 (StgClosure *)*p = evacuate((StgClosure *)*p);
3531 // and don't forget to follow the SRT
3536 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3541 /*-----------------------------------------------------------------------------
3542 scavenge the large object list.
3544 evac_gen set by caller; similar games played with evac_gen as with
3545 scavenge() - see comment at the top of scavenge(). Most large
3546 objects are (repeatedly) mutable, so most of the time evac_gen will
3548 --------------------------------------------------------------------------- */
3551 scavenge_large(step *stp)
3556 bd = stp->new_large_objects;
3558 for (; bd != NULL; bd = stp->new_large_objects) {
3560 /* take this object *off* the large objects list and put it on
3561 * the scavenged large objects list. This is so that we can
3562 * treat new_large_objects as a stack and push new objects on
3563 * the front when evacuating.
3565 stp->new_large_objects = bd->link;
3566 dbl_link_onto(bd, &stp->scavenged_large_objects);
3568 // update the block count in this step.
3569 stp->n_scavenged_large_blocks += bd->blocks;
3572 if (scavenge_one(p)) {
3573 mkMutCons((StgClosure *)p, stp->gen);
3578 /* -----------------------------------------------------------------------------
3579 Initialising the static object & mutable lists
3580 -------------------------------------------------------------------------- */
3583 zero_static_object_list(StgClosure* first_static)
3587 const StgInfoTable *info;
3589 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3591 link = STATIC_LINK(info, p);
3592 STATIC_LINK(info,p) = NULL;
3596 /* This function is only needed because we share the mutable link
3597 * field with the static link field in an IND_STATIC, so we have to
3598 * zero the mut_link field before doing a major GC, which needs the
3599 * static link field.
3601 * It doesn't do any harm to zero all the mutable link fields on the
3606 zero_mutable_list( StgMutClosure *first )
3608 StgMutClosure *next, *c;
3610 for (c = first; c != END_MUT_LIST; c = next) {
3616 /* -----------------------------------------------------------------------------
3618 -------------------------------------------------------------------------- */
3625 for (c = (StgIndStatic *)caf_list; c != NULL;
3626 c = (StgIndStatic *)c->static_link)
3628 c->header.info = c->saved_info;
3629 c->saved_info = NULL;
3630 // could, but not necessary: c->static_link = NULL;
3636 markCAFs( evac_fn evac )
3640 for (c = (StgIndStatic *)caf_list; c != NULL;
3641 c = (StgIndStatic *)c->static_link)
3643 evac(&c->indirectee);
3647 /* -----------------------------------------------------------------------------
3648 Sanity code for CAF garbage collection.
3650 With DEBUG turned on, we manage a CAF list in addition to the SRT
3651 mechanism. After GC, we run down the CAF list and blackhole any
3652 CAFs which have been garbage collected. This means we get an error
3653 whenever the program tries to enter a garbage collected CAF.
3655 Any garbage collected CAFs are taken off the CAF list at the same
3657 -------------------------------------------------------------------------- */
3659 #if 0 && defined(DEBUG)
3666 const StgInfoTable *info;
3677 ASSERT(info->type == IND_STATIC);
3679 if (STATIC_LINK(info,p) == NULL) {
3680 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3682 SET_INFO(p,&stg_BLACKHOLE_info);
3683 p = STATIC_LINK2(info,p);
3687 pp = &STATIC_LINK2(info,p);
3694 // belch("%d CAFs live", i);
3699 /* -----------------------------------------------------------------------------
3702 Whenever a thread returns to the scheduler after possibly doing
3703 some work, we have to run down the stack and black-hole all the
3704 closures referred to by update frames.
3705 -------------------------------------------------------------------------- */
3708 threadLazyBlackHole(StgTSO *tso)
3710 StgUpdateFrame *update_frame;
3711 StgBlockingQueue *bh;
3714 stack_end = &tso->stack[tso->stack_size];
3715 update_frame = tso->su;
3718 switch (get_itbl(update_frame)->type) {
3721 update_frame = ((StgCatchFrame *)update_frame)->link;
3725 bh = (StgBlockingQueue *)update_frame->updatee;
3727 /* if the thunk is already blackholed, it means we've also
3728 * already blackholed the rest of the thunks on this stack,
3729 * so we can stop early.
3731 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3732 * don't interfere with this optimisation.
3734 if (bh->header.info == &stg_BLACKHOLE_info) {
3738 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3739 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3740 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3741 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3745 // We pretend that bh is now dead.
3746 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
3748 SET_INFO(bh,&stg_BLACKHOLE_info);
3751 // We pretend that bh has just been created.
3752 LDV_recordCreate(bh);
3756 update_frame = update_frame->link;
3760 update_frame = ((StgSeqFrame *)update_frame)->link;
3766 barf("threadPaused");
3772 /* -----------------------------------------------------------------------------
3775 * Code largely pinched from old RTS, then hacked to bits. We also do
3776 * lazy black holing here.
3778 * -------------------------------------------------------------------------- */
3781 threadSqueezeStack(StgTSO *tso)
3783 lnat displacement = 0;
3784 StgUpdateFrame *frame;
3785 StgUpdateFrame *next_frame; // Temporally next
3786 StgUpdateFrame *prev_frame; // Temporally previous
3788 rtsBool prev_was_update_frame;
3790 StgUpdateFrame *top_frame;
3791 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3793 void printObj( StgClosure *obj ); // from Printer.c
3795 top_frame = tso->su;
3798 bottom = &(tso->stack[tso->stack_size]);
3801 /* There must be at least one frame, namely the STOP_FRAME.
3803 ASSERT((P_)frame < bottom);
3805 /* Walk down the stack, reversing the links between frames so that
3806 * we can walk back up as we squeeze from the bottom. Note that
3807 * next_frame and prev_frame refer to next and previous as they were
3808 * added to the stack, rather than the way we see them in this
3809 * walk. (It makes the next loop less confusing.)
3811 * Stop if we find an update frame pointing to a black hole
3812 * (see comment in threadLazyBlackHole()).
3816 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3817 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3818 prev_frame = frame->link;
3819 frame->link = next_frame;
3824 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3825 printObj((StgClosure *)prev_frame);
3826 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3829 switch (get_itbl(frame)->type) {
3832 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3845 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3847 printObj((StgClosure *)prev_frame);
3850 if (get_itbl(frame)->type == UPDATE_FRAME
3851 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3856 /* Now, we're at the bottom. Frame points to the lowest update
3857 * frame on the stack, and its link actually points to the frame
3858 * above. We have to walk back up the stack, squeezing out empty
3859 * update frames and turning the pointers back around on the way
3862 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3863 * we never want to eliminate it anyway. Just walk one step up
3864 * before starting to squeeze. When you get to the topmost frame,
3865 * remember that there are still some words above it that might have
3872 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3875 * Loop through all of the frames (everything except the very
3876 * bottom). Things are complicated by the fact that we have
3877 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3878 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3880 while (frame != NULL) {
3882 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3883 rtsBool is_update_frame;
3885 next_frame = frame->link;
3886 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3889 * 1. both the previous and current frame are update frames
3890 * 2. the current frame is empty
3892 if (prev_was_update_frame && is_update_frame &&
3893 (P_)prev_frame == frame_bottom + displacement) {
3895 // Now squeeze out the current frame
3896 StgClosure *updatee_keep = prev_frame->updatee;
3897 StgClosure *updatee_bypass = frame->updatee;
3900 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3904 /* Deal with blocking queues. If both updatees have blocked
3905 * threads, then we should merge the queues into the update
3906 * frame that we're keeping.
3908 * Alternatively, we could just wake them up: they'll just go
3909 * straight to sleep on the proper blackhole! This is less code
3910 * and probably less bug prone, although it's probably much
3913 #if 0 // do it properly...
3914 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3915 # error Unimplemented lazy BH warning. (KSW 1999-01)
3917 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3918 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3920 // Sigh. It has one. Don't lose those threads!
3921 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3922 // Urgh. Two queues. Merge them.
3923 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3925 while (keep_tso->link != END_TSO_QUEUE) {
3926 keep_tso = keep_tso->link;
3928 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3931 // For simplicity, just swap the BQ for the BH
3932 P_ temp = updatee_keep;
3934 updatee_keep = updatee_bypass;
3935 updatee_bypass = temp;
3937 // Record the swap in the kept frame (below)
3938 prev_frame->updatee = updatee_keep;
3943 TICK_UPD_SQUEEZED();
3944 /* wasn't there something about update squeezing and ticky to be
3945 * sorted out? oh yes: we aren't counting each enter properly
3946 * in this case. See the log somewhere. KSW 1999-04-21
3948 * Check two things: that the two update frames don't point to
3949 * the same object, and that the updatee_bypass isn't already an
3950 * indirection. Both of these cases only happen when we're in a
3951 * block hole-style loop (and there are multiple update frames
3952 * on the stack pointing to the same closure), but they can both
3953 * screw us up if we don't check.
3955 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3956 // this wakes the threads up
3957 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3960 sp = (P_)frame - 1; // sp = stuff to slide
3961 displacement += sizeofW(StgUpdateFrame);
3964 // No squeeze for this frame
3965 sp = frame_bottom - 1; // Keep the current frame
3967 /* Do lazy black-holing.
3969 if (is_update_frame) {
3970 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3971 if (bh->header.info != &stg_BLACKHOLE_info &&
3972 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3973 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3974 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3975 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3978 /* zero out the slop so that the sanity checker can tell
3979 * where the next closure is.
3982 StgInfoTable *info = get_itbl(bh);
3983 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3984 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3985 * info is used for a different purpose, and it's exactly the
3986 * same size as a BLACKHOLE in any case.
3988 if (info->type != THUNK_SELECTOR) {
3989 for (i = np; i < np + nw; i++) {
3990 ((StgClosure *)bh)->payload[i] = 0;
3997 // We pretend that bh is now dead.
3998 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
4001 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
4003 SET_INFO(bh,&stg_BLACKHOLE_info);
4006 // We pretend that bh has just been created.
4007 LDV_recordCreate(bh);
4012 // Fix the link in the current frame (should point to the frame below)
4013 frame->link = prev_frame;
4014 prev_was_update_frame = is_update_frame;
4017 // Now slide all words from sp up to the next frame
4019 if (displacement > 0) {
4020 P_ next_frame_bottom;
4022 if (next_frame != NULL)
4023 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
4025 next_frame_bottom = tso->sp - 1;
4029 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
4033 while (sp >= next_frame_bottom) {
4034 sp[displacement] = *sp;
4038 (P_)prev_frame = (P_)frame + displacement;
4042 tso->sp += displacement;
4043 tso->su = prev_frame;
4046 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
4047 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
4052 /* -----------------------------------------------------------------------------
4055 * We have to prepare for GC - this means doing lazy black holing
4056 * here. We also take the opportunity to do stack squeezing if it's
4058 * -------------------------------------------------------------------------- */
4060 threadPaused(StgTSO *tso)
4062 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
4063 threadSqueezeStack(tso); // does black holing too
4065 threadLazyBlackHole(tso);
4068 /* -----------------------------------------------------------------------------
4070 * -------------------------------------------------------------------------- */
4074 printMutOnceList(generation *gen)
4076 StgMutClosure *p, *next;
4078 p = gen->mut_once_list;
4081 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
4082 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
4083 fprintf(stderr, "%p (%s), ",
4084 p, info_type((StgClosure *)p));
4086 fputc('\n', stderr);
4090 printMutableList(generation *gen)
4092 StgMutClosure *p, *next;
4097 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
4098 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
4099 fprintf(stderr, "%p (%s), ",
4100 p, info_type((StgClosure *)p));
4102 fputc('\n', stderr);
4105 static inline rtsBool
4106 maybeLarge(StgClosure *closure)
4108 StgInfoTable *info = get_itbl(closure);
4110 /* closure types that may be found on the new_large_objects list;
4111 see scavenge_large */
4112 return (info->type == MUT_ARR_PTRS ||
4113 info->type == MUT_ARR_PTRS_FROZEN ||
4114 info->type == TSO ||
4115 info->type == ARR_WORDS);