1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.109 2001/07/25 12:18:26 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void cleanup_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgClosure *p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
145 static void scavengeCAFs ( void );
147 #if 0 && defined(DEBUG)
148 static void gcCAFs ( void );
151 /* -----------------------------------------------------------------------------
152 inline functions etc. for dealing with the mark bitmap & stack.
153 -------------------------------------------------------------------------- */
155 #define MARK_STACK_BLOCKS 4
157 static bdescr *mark_stack_bdescr;
158 static StgPtr *mark_stack;
159 static StgPtr *mark_sp;
160 static StgPtr *mark_splim;
162 static inline rtsBool
163 mark_stack_empty(void)
165 return mark_sp == mark_stack;
168 static inline rtsBool
169 mark_stack_full(void)
171 return mark_sp >= mark_splim;
175 push_mark_stack(StgPtr p)
186 /* -----------------------------------------------------------------------------
189 For garbage collecting generation N (and all younger generations):
191 - follow all pointers in the root set. the root set includes all
192 mutable objects in all steps in all generations.
194 - for each pointer, evacuate the object it points to into either
195 + to-space in the next higher step in that generation, if one exists,
196 + if the object's generation == N, then evacuate it to the next
197 generation if one exists, or else to-space in the current
199 + if the object's generation < N, then evacuate it to to-space
200 in the next generation.
202 - repeatedly scavenge to-space from each step in each generation
203 being collected until no more objects can be evacuated.
205 - free from-space in each step, and set from-space = to-space.
207 -------------------------------------------------------------------------- */
210 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
214 lnat live, allocated, collected = 0, copied = 0;
215 lnat oldgen_saved_blocks = 0;
219 CostCentreStack *prev_CCS;
222 #if defined(DEBUG) && defined(GRAN)
223 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
227 // tell the stats department that we've started a GC
230 // Init stats and print par specific (timing) info
231 PAR_TICKY_PAR_START();
233 // attribute any costs to CCS_GC
239 /* Approximate how much we allocated.
240 * Todo: only when generating stats?
242 allocated = calcAllocated();
244 /* Figure out which generation to collect
246 if (force_major_gc) {
247 N = RtsFlags.GcFlags.generations - 1;
251 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
252 if (generations[g].steps[0].n_blocks +
253 generations[g].steps[0].n_large_blocks
254 >= generations[g].max_blocks) {
258 major_gc = (N == RtsFlags.GcFlags.generations-1);
261 #ifdef RTS_GTK_FRONTPANEL
262 if (RtsFlags.GcFlags.frontpanel) {
263 updateFrontPanelBeforeGC(N);
267 // check stack sanity *before* GC (ToDo: check all threads)
269 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
271 IF_DEBUG(sanity, checkFreeListSanity());
273 /* Initialise the static object lists
275 static_objects = END_OF_STATIC_LIST;
276 scavenged_static_objects = END_OF_STATIC_LIST;
278 /* zero the mutable list for the oldest generation (see comment by
279 * zero_mutable_list below).
282 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
285 /* Save the old to-space if we're doing a two-space collection
287 if (RtsFlags.GcFlags.generations == 1) {
288 old_to_blocks = g0s0->to_blocks;
289 g0s0->to_blocks = NULL;
292 /* Keep a count of how many new blocks we allocated during this GC
293 * (used for resizing the allocation area, later).
297 /* Initialise to-space in all the generations/steps that we're
300 for (g = 0; g <= N; g++) {
301 generations[g].mut_once_list = END_MUT_LIST;
302 generations[g].mut_list = END_MUT_LIST;
304 for (s = 0; s < generations[g].n_steps; s++) {
306 // generation 0, step 0 doesn't need to-space
307 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
311 /* Get a free block for to-space. Extra blocks will be chained on
315 stp = &generations[g].steps[s];
316 ASSERT(stp->gen_no == g);
317 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
321 bd->flags = BF_EVACUATED; // it's a to-space block
323 stp->hpLim = stp->hp + BLOCK_SIZE_W;
326 stp->n_to_blocks = 1;
327 stp->scan = bd->start;
329 stp->new_large_objects = NULL;
330 stp->scavenged_large_objects = NULL;
331 stp->n_scavenged_large_blocks = 0;
333 // mark the large objects as not evacuated yet
334 for (bd = stp->large_objects; bd; bd = bd->link) {
335 bd->flags = BF_LARGE;
338 // for a compacted step, we need to allocate the bitmap
339 if (stp->is_compacted) {
340 nat bitmap_size; // in bytes
341 bdescr *bitmap_bdescr;
344 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
346 if (bitmap_size > 0) {
347 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
349 stp->bitmap = bitmap_bdescr;
350 bitmap = bitmap_bdescr->start;
352 IF_DEBUG(gc, fprintf(stderr, "bitmap_size: %d, bitmap: %p\n",
353 bitmap_size, bitmap););
355 // don't forget to fill it with zeros!
356 memset(bitmap, 0, bitmap_size);
358 // for each block in this step, point to its bitmap from the
360 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
361 bd->u.bitmap = bitmap;
362 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
369 /* make sure the older generations have at least one block to
370 * allocate into (this makes things easier for copy(), see below.
372 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
373 for (s = 0; s < generations[g].n_steps; s++) {
374 stp = &generations[g].steps[s];
375 if (stp->hp_bd == NULL) {
376 ASSERT(stp->blocks == NULL);
381 bd->flags = 0; // *not* a to-space block or a large object
383 stp->hpLim = stp->hp + BLOCK_SIZE_W;
389 /* Set the scan pointer for older generations: remember we
390 * still have to scavenge objects that have been promoted. */
392 stp->scan_bd = stp->hp_bd;
393 stp->to_blocks = NULL;
394 stp->n_to_blocks = 0;
395 stp->new_large_objects = NULL;
396 stp->scavenged_large_objects = NULL;
397 stp->n_scavenged_large_blocks = 0;
401 /* Allocate a mark stack if we're doing a major collection.
404 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
405 mark_stack = (StgPtr *)mark_stack_bdescr->start;
406 mark_sp = mark_stack;
407 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
409 mark_stack_bdescr = NULL;
412 /* -----------------------------------------------------------------------
413 * follow all the roots that we know about:
414 * - mutable lists from each generation > N
415 * we want to *scavenge* these roots, not evacuate them: they're not
416 * going to move in this GC.
417 * Also: do them in reverse generation order. This is because we
418 * often want to promote objects that are pointed to by older
419 * generations early, so we don't have to repeatedly copy them.
420 * Doing the generations in reverse order ensures that we don't end
421 * up in the situation where we want to evac an object to gen 3 and
422 * it has already been evaced to gen 2.
426 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
427 generations[g].saved_mut_list = generations[g].mut_list;
428 generations[g].mut_list = END_MUT_LIST;
431 // Do the mut-once lists first
432 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
433 IF_PAR_DEBUG(verbose,
434 printMutOnceList(&generations[g]));
435 scavenge_mut_once_list(&generations[g]);
437 for (st = generations[g].n_steps-1; st >= 0; st--) {
438 scavenge(&generations[g].steps[st]);
442 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
443 IF_PAR_DEBUG(verbose,
444 printMutableList(&generations[g]));
445 scavenge_mutable_list(&generations[g]);
447 for (st = generations[g].n_steps-1; st >= 0; st--) {
448 scavenge(&generations[g].steps[st]);
455 /* follow all the roots that the application knows about.
458 get_roots(mark_root);
461 /* And don't forget to mark the TSO if we got here direct from
463 /* Not needed in a seq version?
465 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
469 // Mark the entries in the GALA table of the parallel system
470 markLocalGAs(major_gc);
471 // Mark all entries on the list of pending fetches
472 markPendingFetches(major_gc);
475 /* Mark the weak pointer list, and prepare to detect dead weak
478 old_weak_ptr_list = weak_ptr_list;
479 weak_ptr_list = NULL;
480 weak_done = rtsFalse;
482 /* The all_threads list is like the weak_ptr_list.
483 * See traverse_weak_ptr_list() for the details.
485 old_all_threads = all_threads;
486 all_threads = END_TSO_QUEUE;
487 resurrected_threads = END_TSO_QUEUE;
489 /* Mark the stable pointer table.
491 markStablePtrTable(mark_root);
495 /* ToDo: To fix the caf leak, we need to make the commented out
496 * parts of this code do something sensible - as described in
499 extern void markHugsObjects(void);
504 /* -------------------------------------------------------------------------
505 * Repeatedly scavenge all the areas we know about until there's no
506 * more scavenging to be done.
513 // scavenge static objects
514 if (major_gc && static_objects != END_OF_STATIC_LIST) {
515 IF_DEBUG(sanity, checkStaticObjects(static_objects));
519 /* When scavenging the older generations: Objects may have been
520 * evacuated from generations <= N into older generations, and we
521 * need to scavenge these objects. We're going to try to ensure that
522 * any evacuations that occur move the objects into at least the
523 * same generation as the object being scavenged, otherwise we
524 * have to create new entries on the mutable list for the older
528 // scavenge each step in generations 0..maxgen
534 // scavenge objects in compacted generation
535 if (mark_stack_bdescr != NULL && !mark_stack_empty()) {
536 scavenge_mark_stack();
540 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
541 for (st = generations[gen].n_steps; --st >= 0; ) {
542 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
545 stp = &generations[gen].steps[st];
547 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
552 if (stp->new_large_objects != NULL) {
561 if (flag) { goto loop; }
564 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
569 /* Final traversal of the weak pointer list (see comment by
570 * cleanUpWeakPtrList below).
572 cleanup_weak_ptr_list(&weak_ptr_list);
575 // Reconstruct the Global Address tables used in GUM
576 rebuildGAtables(major_gc);
577 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
580 // Now see which stable names are still alive.
583 // Tidy the end of the to-space chains
584 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
585 for (s = 0; s < generations[g].n_steps; s++) {
586 stp = &generations[g].steps[s];
587 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
588 stp->hp_bd->free = stp->hp;
589 stp->hp_bd->link = NULL;
594 // NO MORE EVACUATION AFTER THIS POINT!
595 // Finally: compaction of the oldest generation.
596 if (major_gc && RtsFlags.GcFlags.compact) {
597 // save number of blocks for stats
598 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
602 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
604 /* run through all the generations/steps and tidy up
606 copied = new_blocks * BLOCK_SIZE_W;
607 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
610 generations[g].collections++; // for stats
613 for (s = 0; s < generations[g].n_steps; s++) {
615 stp = &generations[g].steps[s];
617 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
618 // stats information: how much we copied
620 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
625 // for generations we collected...
628 // rough calculation of garbage collected, for stats output
629 if (stp->is_compacted) {
630 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
632 collected += stp->n_blocks * BLOCK_SIZE_W;
635 /* free old memory and shift to-space into from-space for all
636 * the collected steps (except the allocation area). These
637 * freed blocks will probaby be quickly recycled.
639 if (!(g == 0 && s == 0)) {
640 if (stp->is_compacted) {
641 // for a compacted step, just shift the new to-space
642 // onto the front of the now-compacted existing blocks.
643 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
644 bd->flags &= ~BF_EVACUATED; // now from-space
646 // tack the new blocks on the end of the existing blocks
647 if (stp->blocks == NULL) {
648 stp->blocks = stp->to_blocks;
650 for (bd = stp->blocks; bd != NULL; bd = next) {
653 bd->link = stp->to_blocks;
657 // add the new blocks to the block tally
658 stp->n_blocks += stp->n_to_blocks;
660 freeChain(stp->blocks);
661 stp->blocks = stp->to_blocks;
662 stp->n_blocks = stp->n_to_blocks;
663 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
664 bd->flags &= ~BF_EVACUATED; // now from-space
667 stp->to_blocks = NULL;
668 stp->n_to_blocks = 0;
671 /* LARGE OBJECTS. The current live large objects are chained on
672 * scavenged_large, having been moved during garbage
673 * collection from large_objects. Any objects left on
674 * large_objects list are therefore dead, so we free them here.
676 for (bd = stp->large_objects; bd != NULL; bd = next) {
682 // update the count of blocks used by large objects
683 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
684 bd->flags &= ~BF_EVACUATED;
686 stp->large_objects = stp->scavenged_large_objects;
687 stp->n_large_blocks = stp->n_scavenged_large_blocks;
689 /* Set the maximum blocks for this generation, interpolating
690 * between the maximum size of the oldest and youngest
693 * max_blocks = oldgen_max_blocks * G
694 * ----------------------
699 generations[g].max_blocks = (oldest_gen->max_blocks * g)
700 / (RtsFlags.GcFlags.generations-1);
702 generations[g].max_blocks = oldest_gen->max_blocks;
705 // for older generations...
708 /* For older generations, we need to append the
709 * scavenged_large_object list (i.e. large objects that have been
710 * promoted during this GC) to the large_object list for that step.
712 for (bd = stp->scavenged_large_objects; bd; bd = next) {
714 bd->flags &= ~BF_EVACUATED;
715 dbl_link_onto(bd, &stp->large_objects);
718 // add the new blocks we promoted during this GC
719 stp->n_blocks += stp->n_to_blocks;
720 stp->n_large_blocks += stp->n_scavenged_large_blocks;
725 /* Set the maximum blocks for the oldest generation, based on twice
726 * the amount of live data now, adjusted to fit the maximum heap
729 * This is an approximation, since in the worst case we'll need
730 * twice the amount of live data plus whatever space the other
733 if (major_gc && RtsFlags.GcFlags.generations > 1) {
734 oldest_gen->max_blocks =
735 stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor,
736 RtsFlags.GcFlags.minOldGenSize);
737 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
738 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
739 if (((int)oldest_gen->max_blocks -
740 (int)oldest_gen->steps[0].n_blocks) <
741 (RtsFlags.GcFlags.pcFreeHeap *
742 RtsFlags.GcFlags.maxHeapSize / 200)) {
748 // Guess the amount of live data for stats.
751 /* Free the small objects allocated via allocate(), since this will
752 * all have been copied into G0S1 now.
754 if (small_alloc_list != NULL) {
755 freeChain(small_alloc_list);
757 small_alloc_list = NULL;
761 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
763 /* Free the mark stack.
765 if (mark_stack_bdescr != NULL) {
766 freeGroup(mark_stack_bdescr);
771 for (g = 0; g <= N; g++) {
772 for (s = 0; s < generations[g].n_steps; s++) {
773 stp = &generations[g].steps[s];
774 if (stp->is_compacted && stp->bitmap != NULL) {
775 freeGroup(stp->bitmap);
780 /* Two-space collector:
781 * Free the old to-space, and estimate the amount of live data.
783 if (RtsFlags.GcFlags.generations == 1) {
786 if (old_to_blocks != NULL) {
787 freeChain(old_to_blocks);
789 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
790 bd->flags = 0; // now from-space
793 /* For a two-space collector, we need to resize the nursery. */
795 /* set up a new nursery. Allocate a nursery size based on a
796 * function of the amount of live data (currently a factor of 2,
797 * should be configurable (ToDo)). Use the blocks from the old
798 * nursery if possible, freeing up any left over blocks.
800 * If we get near the maximum heap size, then adjust our nursery
801 * size accordingly. If the nursery is the same size as the live
802 * data (L), then we need 3L bytes. We can reduce the size of the
803 * nursery to bring the required memory down near 2L bytes.
805 * A normal 2-space collector would need 4L bytes to give the same
806 * performance we get from 3L bytes, reducing to the same
807 * performance at 2L bytes.
809 blocks = g0s0->n_to_blocks;
811 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
812 RtsFlags.GcFlags.maxHeapSize ) {
813 long adjusted_blocks; // signed on purpose
816 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
817 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
818 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
819 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
822 blocks = adjusted_blocks;
825 blocks *= RtsFlags.GcFlags.oldGenFactor;
826 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
827 blocks = RtsFlags.GcFlags.minAllocAreaSize;
830 resizeNursery(blocks);
833 /* Generational collector:
834 * If the user has given us a suggested heap size, adjust our
835 * allocation area to make best use of the memory available.
838 if (RtsFlags.GcFlags.heapSizeSuggestion) {
840 nat needed = calcNeeded(); // approx blocks needed at next GC
842 /* Guess how much will be live in generation 0 step 0 next time.
843 * A good approximation is obtained by finding the
844 * percentage of g0s0 that was live at the last minor GC.
847 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
850 /* Estimate a size for the allocation area based on the
851 * information available. We might end up going slightly under
852 * or over the suggested heap size, but we should be pretty
855 * Formula: suggested - needed
856 * ----------------------------
857 * 1 + g0s0_pcnt_kept/100
859 * where 'needed' is the amount of memory needed at the next
860 * collection for collecting all steps except g0s0.
863 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
864 (100 + (long)g0s0_pcnt_kept);
866 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
867 blocks = RtsFlags.GcFlags.minAllocAreaSize;
870 resizeNursery((nat)blocks);
874 // mark the garbage collected CAFs as dead
875 #if 0 && defined(DEBUG) // doesn't work at the moment
876 if (major_gc) { gcCAFs(); }
879 // zero the scavenged static object list
881 zero_static_object_list(scavenged_static_objects);
888 // start any pending finalizers
889 scheduleFinalizers(old_weak_ptr_list);
891 // send exceptions to any threads which were about to die
892 resurrectThreads(resurrected_threads);
894 // Update the stable pointer hash table.
895 updateStablePtrTable(major_gc);
897 // check sanity after GC
898 IF_DEBUG(sanity, checkSanity());
900 // extra GC trace info
901 IF_DEBUG(gc, statDescribeGens());
904 // symbol-table based profiling
905 /* heapCensus(to_blocks); */ /* ToDo */
908 // restore enclosing cost centre
914 // check for memory leaks if sanity checking is on
915 IF_DEBUG(sanity, memInventory());
917 #ifdef RTS_GTK_FRONTPANEL
918 if (RtsFlags.GcFlags.frontpanel) {
919 updateFrontPanelAfterGC( N, live );
923 // ok, GC over: tell the stats department what happened.
924 stat_endGC(allocated, collected, live, copied, N);
930 /* -----------------------------------------------------------------------------
933 traverse_weak_ptr_list is called possibly many times during garbage
934 collection. It returns a flag indicating whether it did any work
935 (i.e. called evacuate on any live pointers).
937 Invariant: traverse_weak_ptr_list is called when the heap is in an
938 idempotent state. That means that there are no pending
939 evacuate/scavenge operations. This invariant helps the weak
940 pointer code decide which weak pointers are dead - if there are no
941 new live weak pointers, then all the currently unreachable ones are
944 For generational GC: we just don't try to finalize weak pointers in
945 older generations than the one we're collecting. This could
946 probably be optimised by keeping per-generation lists of weak
947 pointers, but for a few weak pointers this scheme will work.
948 -------------------------------------------------------------------------- */
951 traverse_weak_ptr_list(void)
953 StgWeak *w, **last_w, *next_w;
955 rtsBool flag = rtsFalse;
957 if (weak_done) { return rtsFalse; }
959 /* doesn't matter where we evacuate values/finalizers to, since
960 * these pointers are treated as roots (iff the keys are alive).
964 last_w = &old_weak_ptr_list;
965 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
967 /* First, this weak pointer might have been evacuated. If so,
968 * remove the forwarding pointer from the weak_ptr_list.
970 if (get_itbl(w)->type == EVACUATED) {
971 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
975 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
976 * called on a live weak pointer object. Just remove it.
978 if (w->header.info == &stg_DEAD_WEAK_info) {
979 next_w = ((StgDeadWeak *)w)->link;
984 ASSERT(get_itbl(w)->type == WEAK);
986 /* Now, check whether the key is reachable.
988 if ((new = isAlive(w->key))) {
990 // evacuate the value and finalizer
991 w->value = evacuate(w->value);
992 w->finalizer = evacuate(w->finalizer);
993 // remove this weak ptr from the old_weak_ptr list
995 // and put it on the new weak ptr list
997 w->link = weak_ptr_list;
1000 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
1004 last_w = &(w->link);
1010 /* Now deal with the all_threads list, which behaves somewhat like
1011 * the weak ptr list. If we discover any threads that are about to
1012 * become garbage, we wake them up and administer an exception.
1015 StgTSO *t, *tmp, *next, **prev;
1017 prev = &old_all_threads;
1018 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1020 (StgClosure *)tmp = isAlive((StgClosure *)t);
1026 ASSERT(get_itbl(t)->type == TSO);
1027 switch (t->what_next) {
1028 case ThreadRelocated:
1033 case ThreadComplete:
1034 // finshed or died. The thread might still be alive, but we
1035 // don't keep it on the all_threads list. Don't forget to
1036 // stub out its global_link field.
1037 next = t->global_link;
1038 t->global_link = END_TSO_QUEUE;
1046 // not alive (yet): leave this thread on the old_all_threads list.
1047 prev = &(t->global_link);
1048 next = t->global_link;
1052 // alive: move this thread onto the all_threads list.
1053 next = t->global_link;
1054 t->global_link = all_threads;
1062 /* If we didn't make any changes, then we can go round and kill all
1063 * the dead weak pointers. The old_weak_ptr list is used as a list
1064 * of pending finalizers later on.
1066 if (flag == rtsFalse) {
1067 cleanup_weak_ptr_list(&old_weak_ptr_list);
1068 for (w = old_weak_ptr_list; w; w = w->link) {
1069 w->finalizer = evacuate(w->finalizer);
1072 /* And resurrect any threads which were about to become garbage.
1075 StgTSO *t, *tmp, *next;
1076 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1077 next = t->global_link;
1078 (StgClosure *)tmp = evacuate((StgClosure *)t);
1079 tmp->global_link = resurrected_threads;
1080 resurrected_threads = tmp;
1084 weak_done = rtsTrue;
1090 /* -----------------------------------------------------------------------------
1091 After GC, the live weak pointer list may have forwarding pointers
1092 on it, because a weak pointer object was evacuated after being
1093 moved to the live weak pointer list. We remove those forwarding
1096 Also, we don't consider weak pointer objects to be reachable, but
1097 we must nevertheless consider them to be "live" and retain them.
1098 Therefore any weak pointer objects which haven't as yet been
1099 evacuated need to be evacuated now.
1100 -------------------------------------------------------------------------- */
1104 cleanup_weak_ptr_list ( StgWeak **list )
1106 StgWeak *w, **last_w;
1109 for (w = *list; w; w = w->link) {
1111 if (get_itbl(w)->type == EVACUATED) {
1112 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1116 if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) {
1117 (StgClosure *)w = evacuate((StgClosure *)w);
1120 last_w = &(w->link);
1124 /* -----------------------------------------------------------------------------
1125 isAlive determines whether the given closure is still alive (after
1126 a garbage collection) or not. It returns the new address of the
1127 closure if it is alive, or NULL otherwise.
1129 NOTE: Use it before compaction only!
1130 -------------------------------------------------------------------------- */
1134 isAlive(StgClosure *p)
1136 const StgInfoTable *info;
1143 /* ToDo: for static closures, check the static link field.
1144 * Problem here is that we sometimes don't set the link field, eg.
1145 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1150 // ignore closures in generations that we're not collecting.
1151 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1154 // large objects have an evacuated flag
1155 if (bd->flags & BF_LARGE) {
1156 if (bd->flags & BF_EVACUATED) {
1162 // check the mark bit for compacted steps
1163 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1167 switch (info->type) {
1172 case IND_OLDGEN: // rely on compatible layout with StgInd
1173 case IND_OLDGEN_PERM:
1174 // follow indirections
1175 p = ((StgInd *)p)->indirectee;
1180 return ((StgEvacuated *)p)->evacuee;
1183 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1184 p = (StgClosure *)((StgTSO *)p)->link;
1196 mark_root(StgClosure **root)
1198 *root = evacuate(*root);
1204 bdescr *bd = allocBlock();
1205 bd->gen_no = stp->gen_no;
1208 if (stp->gen_no <= N) {
1209 bd->flags = BF_EVACUATED;
1214 stp->hp_bd->free = stp->hp;
1215 stp->hp_bd->link = bd;
1216 stp->hp = bd->start;
1217 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1224 static __inline__ void
1225 upd_evacuee(StgClosure *p, StgClosure *dest)
1227 p->header.info = &stg_EVACUATED_info;
1228 ((StgEvacuated *)p)->evacuee = dest;
1232 static __inline__ StgClosure *
1233 copy(StgClosure *src, nat size, step *stp)
1237 TICK_GC_WORDS_COPIED(size);
1238 /* Find out where we're going, using the handy "to" pointer in
1239 * the step of the source object. If it turns out we need to
1240 * evacuate to an older generation, adjust it here (see comment
1243 if (stp->gen_no < evac_gen) {
1244 #ifdef NO_EAGER_PROMOTION
1245 failed_to_evac = rtsTrue;
1247 stp = &generations[evac_gen].steps[0];
1251 /* chain a new block onto the to-space for the destination step if
1254 if (stp->hp + size >= stp->hpLim) {
1258 for(to = stp->hp, from = (P_)src; size>0; --size) {
1264 upd_evacuee(src,(StgClosure *)dest);
1265 return (StgClosure *)dest;
1268 /* Special version of copy() for when we only want to copy the info
1269 * pointer of an object, but reserve some padding after it. This is
1270 * used to optimise evacuation of BLACKHOLEs.
1274 static __inline__ StgClosure *
1275 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1279 TICK_GC_WORDS_COPIED(size_to_copy);
1280 if (stp->gen_no < evac_gen) {
1281 #ifdef NO_EAGER_PROMOTION
1282 failed_to_evac = rtsTrue;
1284 stp = &generations[evac_gen].steps[0];
1288 if (stp->hp + size_to_reserve >= stp->hpLim) {
1292 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1297 stp->hp += size_to_reserve;
1298 upd_evacuee(src,(StgClosure *)dest);
1299 return (StgClosure *)dest;
1303 /* -----------------------------------------------------------------------------
1304 Evacuate a large object
1306 This just consists of removing the object from the (doubly-linked)
1307 large_alloc_list, and linking it on to the (singly-linked)
1308 new_large_objects list, from where it will be scavenged later.
1310 Convention: bd->flags has BF_EVACUATED set for a large object
1311 that has been evacuated, or unset otherwise.
1312 -------------------------------------------------------------------------- */
1316 evacuate_large(StgPtr p)
1318 bdescr *bd = Bdescr(p);
1321 // should point to the beginning of the block
1322 ASSERT(((W_)p & BLOCK_MASK) == 0);
1324 // already evacuated?
1325 if (bd->flags & BF_EVACUATED) {
1326 /* Don't forget to set the failed_to_evac flag if we didn't get
1327 * the desired destination (see comments in evacuate()).
1329 if (bd->gen_no < evac_gen) {
1330 failed_to_evac = rtsTrue;
1331 TICK_GC_FAILED_PROMOTION();
1337 // remove from large_object list
1339 bd->u.back->link = bd->link;
1340 } else { // first object in the list
1341 stp->large_objects = bd->link;
1344 bd->link->u.back = bd->u.back;
1347 /* link it on to the evacuated large object list of the destination step
1350 if (stp->gen_no < evac_gen) {
1351 #ifdef NO_EAGER_PROMOTION
1352 failed_to_evac = rtsTrue;
1354 stp = &generations[evac_gen].steps[0];
1359 bd->gen_no = stp->gen_no;
1360 bd->link = stp->new_large_objects;
1361 stp->new_large_objects = bd;
1362 bd->flags |= BF_EVACUATED;
1365 /* -----------------------------------------------------------------------------
1366 Adding a MUT_CONS to an older generation.
1368 This is necessary from time to time when we end up with an
1369 old-to-new generation pointer in a non-mutable object. We defer
1370 the promotion until the next GC.
1371 -------------------------------------------------------------------------- */
1375 mkMutCons(StgClosure *ptr, generation *gen)
1380 stp = &gen->steps[0];
1382 /* chain a new block onto the to-space for the destination step if
1385 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1389 q = (StgMutVar *)stp->hp;
1390 stp->hp += sizeofW(StgMutVar);
1392 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1394 recordOldToNewPtrs((StgMutClosure *)q);
1396 return (StgClosure *)q;
1399 /* -----------------------------------------------------------------------------
1402 This is called (eventually) for every live object in the system.
1404 The caller to evacuate specifies a desired generation in the
1405 evac_gen global variable. The following conditions apply to
1406 evacuating an object which resides in generation M when we're
1407 collecting up to generation N
1411 else evac to step->to
1413 if M < evac_gen evac to evac_gen, step 0
1415 if the object is already evacuated, then we check which generation
1418 if M >= evac_gen do nothing
1419 if M < evac_gen set failed_to_evac flag to indicate that we
1420 didn't manage to evacuate this object into evac_gen.
1422 -------------------------------------------------------------------------- */
1425 evacuate(StgClosure *q)
1430 const StgInfoTable *info;
1433 if (HEAP_ALLOCED(q)) {
1436 if (bd->gen_no > N) {
1437 /* Can't evacuate this object, because it's in a generation
1438 * older than the ones we're collecting. Let's hope that it's
1439 * in evac_gen or older, or we will have to arrange to track
1440 * this pointer using the mutable list.
1442 if (bd->gen_no < evac_gen) {
1444 failed_to_evac = rtsTrue;
1445 TICK_GC_FAILED_PROMOTION();
1450 /* evacuate large objects by re-linking them onto a different list.
1452 if (bd->flags & BF_LARGE) {
1454 if (info->type == TSO &&
1455 ((StgTSO *)q)->what_next == ThreadRelocated) {
1456 q = (StgClosure *)((StgTSO *)q)->link;
1459 evacuate_large((P_)q);
1463 /* If the object is in a step that we're compacting, then we
1464 * need to use an alternative evacuate procedure.
1466 if (bd->step->is_compacted) {
1467 if (!is_marked((P_)q,bd)) {
1469 if (mark_stack_full()) {
1470 barf("ToDo: mark stack full");
1472 push_mark_stack((P_)q);
1480 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1483 // make sure the info pointer is into text space
1484 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1485 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1488 switch (info -> type) {
1492 to = copy(q,sizeW_fromITBL(info),stp);
1497 StgWord w = (StgWord)q->payload[0];
1498 if (q->header.info == Czh_con_info &&
1499 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1500 (StgChar)w <= MAX_CHARLIKE) {
1501 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1503 if (q->header.info == Izh_con_info &&
1504 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1505 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1507 // else, fall through ...
1513 return copy(q,sizeofW(StgHeader)+1,stp);
1515 case THUNK_1_0: // here because of MIN_UPD_SIZE
1520 #ifdef NO_PROMOTE_THUNKS
1521 if (bd->gen_no == 0 &&
1522 bd->step->no != 0 &&
1523 bd->step->no == generations[bd->gen_no].n_steps-1) {
1527 return copy(q,sizeofW(StgHeader)+2,stp);
1535 return copy(q,sizeofW(StgHeader)+2,stp);
1541 case IND_OLDGEN_PERM:
1546 return copy(q,sizeW_fromITBL(info),stp);
1549 case SE_CAF_BLACKHOLE:
1552 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1555 to = copy(q,BLACKHOLE_sizeW(),stp);
1558 case THUNK_SELECTOR:
1560 const StgInfoTable* selectee_info;
1561 StgClosure* selectee = ((StgSelector*)q)->selectee;
1564 selectee_info = get_itbl(selectee);
1565 switch (selectee_info->type) {
1574 StgWord offset = info->layout.selector_offset;
1576 // check that the size is in range
1578 (StgWord32)(selectee_info->layout.payload.ptrs +
1579 selectee_info->layout.payload.nptrs));
1581 // perform the selection!
1582 q = selectee->payload[offset];
1584 /* if we're already in to-space, there's no need to continue
1585 * with the evacuation, just update the source address with
1586 * a pointer to the (evacuated) constructor field.
1588 if (HEAP_ALLOCED(q)) {
1589 bdescr *bd = Bdescr((P_)q);
1590 if (bd->flags & BF_EVACUATED) {
1591 if (bd->gen_no < evac_gen) {
1592 failed_to_evac = rtsTrue;
1593 TICK_GC_FAILED_PROMOTION();
1599 /* otherwise, carry on and evacuate this constructor field,
1600 * (but not the constructor itself)
1609 case IND_OLDGEN_PERM:
1610 selectee = ((StgInd *)selectee)->indirectee;
1614 selectee = ((StgEvacuated *)selectee)->evacuee;
1617 case THUNK_SELECTOR:
1619 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1620 something) to go into an infinite loop when the nightly
1621 stage2 compiles PrelTup.lhs. */
1623 /* we can't recurse indefinitely in evacuate(), so set a
1624 * limit on the number of times we can go around this
1627 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1629 bd = Bdescr((P_)selectee);
1630 if (!bd->flags & BF_EVACUATED) {
1631 thunk_selector_depth++;
1632 selectee = evacuate(selectee);
1633 thunk_selector_depth--;
1637 // otherwise, fall through...
1649 case SE_CAF_BLACKHOLE:
1653 // not evaluated yet
1657 // a copy of the top-level cases below
1658 case RBH: // cf. BLACKHOLE_BQ
1660 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1661 to = copy(q,BLACKHOLE_sizeW(),stp);
1662 //ToDo: derive size etc from reverted IP
1663 //to = copy(q,size,stp);
1664 // recordMutable((StgMutClosure *)to);
1669 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1670 to = copy(q,sizeofW(StgBlockedFetch),stp);
1677 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1678 to = copy(q,sizeofW(StgFetchMe),stp);
1682 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1683 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1688 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1689 (int)(selectee_info->type));
1692 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1696 // follow chains of indirections, don't evacuate them
1697 q = ((StgInd*)q)->indirectee;
1701 if (info->srt_len > 0 && major_gc &&
1702 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1703 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1704 static_objects = (StgClosure *)q;
1709 if (info->srt_len > 0 && major_gc &&
1710 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1711 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1712 static_objects = (StgClosure *)q;
1717 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1718 * on the CAF list, so don't do anything with it here (we'll
1719 * scavenge it later).
1722 && ((StgIndStatic *)q)->saved_info == NULL
1723 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1724 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1725 static_objects = (StgClosure *)q;
1730 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1731 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1732 static_objects = (StgClosure *)q;
1736 case CONSTR_INTLIKE:
1737 case CONSTR_CHARLIKE:
1738 case CONSTR_NOCAF_STATIC:
1739 /* no need to put these on the static linked list, they don't need
1754 // shouldn't see these
1755 barf("evacuate: stack frame at %p\n", q);
1759 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1760 * of stack, tagging and all.
1762 return copy(q,pap_sizeW((StgPAP*)q),stp);
1765 /* Already evacuated, just return the forwarding address.
1766 * HOWEVER: if the requested destination generation (evac_gen) is
1767 * older than the actual generation (because the object was
1768 * already evacuated to a younger generation) then we have to
1769 * set the failed_to_evac flag to indicate that we couldn't
1770 * manage to promote the object to the desired generation.
1772 if (evac_gen > 0) { // optimisation
1773 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1774 if (Bdescr((P_)p)->gen_no < evac_gen) {
1775 failed_to_evac = rtsTrue;
1776 TICK_GC_FAILED_PROMOTION();
1779 return ((StgEvacuated*)q)->evacuee;
1782 // just copy the block
1783 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1786 case MUT_ARR_PTRS_FROZEN:
1787 // just copy the block
1788 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1792 StgTSO *tso = (StgTSO *)q;
1794 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1796 if (tso->what_next == ThreadRelocated) {
1797 q = (StgClosure *)tso->link;
1801 /* To evacuate a small TSO, we need to relocate the update frame
1805 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1806 move_TSO(tso, new_tso);
1807 return (StgClosure *)new_tso;
1812 case RBH: // cf. BLACKHOLE_BQ
1814 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1815 to = copy(q,BLACKHOLE_sizeW(),stp);
1816 //ToDo: derive size etc from reverted IP
1817 //to = copy(q,size,stp);
1819 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1820 q, info_type(q), to, info_type(to)));
1825 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1826 to = copy(q,sizeofW(StgBlockedFetch),stp);
1828 belch("@@ evacuate: %p (%s) to %p (%s)",
1829 q, info_type(q), to, info_type(to)));
1836 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1837 to = copy(q,sizeofW(StgFetchMe),stp);
1839 belch("@@ evacuate: %p (%s) to %p (%s)",
1840 q, info_type(q), to, info_type(to)));
1844 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1845 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1847 belch("@@ evacuate: %p (%s) to %p (%s)",
1848 q, info_type(q), to, info_type(to)));
1853 barf("evacuate: strange closure type %d", (int)(info->type));
1859 /* -----------------------------------------------------------------------------
1860 move_TSO is called to update the TSO structure after it has been
1861 moved from one place to another.
1862 -------------------------------------------------------------------------- */
1865 move_TSO(StgTSO *src, StgTSO *dest)
1869 // relocate the stack pointers...
1870 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1871 dest->sp = (StgPtr)dest->sp + diff;
1872 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1874 relocate_stack(dest, diff);
1877 /* -----------------------------------------------------------------------------
1878 relocate_stack is called to update the linkage between
1879 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1881 -------------------------------------------------------------------------- */
1884 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1892 while ((P_)su < dest->stack + dest->stack_size) {
1893 switch (get_itbl(su)->type) {
1895 // GCC actually manages to common up these three cases!
1898 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1903 cf = (StgCatchFrame *)su;
1904 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1909 sf = (StgSeqFrame *)su;
1910 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1919 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1930 scavenge_srt(const StgInfoTable *info)
1932 StgClosure **srt, **srt_end;
1934 /* evacuate the SRT. If srt_len is zero, then there isn't an
1935 * srt field in the info table. That's ok, because we'll
1936 * never dereference it.
1938 srt = (StgClosure **)(info->srt);
1939 srt_end = srt + info->srt_len;
1940 for (; srt < srt_end; srt++) {
1941 /* Special-case to handle references to closures hiding out in DLLs, since
1942 double indirections required to get at those. The code generator knows
1943 which is which when generating the SRT, so it stores the (indirect)
1944 reference to the DLL closure in the table by first adding one to it.
1945 We check for this here, and undo the addition before evacuating it.
1947 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1948 closure that's fixed at link-time, and no extra magic is required.
1950 #ifdef ENABLE_WIN32_DLL_SUPPORT
1951 if ( (unsigned long)(*srt) & 0x1 ) {
1952 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1962 /* -----------------------------------------------------------------------------
1964 -------------------------------------------------------------------------- */
1967 scavengeTSO (StgTSO *tso)
1969 // chase the link field for any TSOs on the same queue
1970 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1971 if ( tso->why_blocked == BlockedOnMVar
1972 || tso->why_blocked == BlockedOnBlackHole
1973 || tso->why_blocked == BlockedOnException
1975 || tso->why_blocked == BlockedOnGA
1976 || tso->why_blocked == BlockedOnGA_NoSend
1979 tso->block_info.closure = evacuate(tso->block_info.closure);
1981 if ( tso->blocked_exceptions != NULL ) {
1982 tso->blocked_exceptions =
1983 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1985 // scavenge this thread's stack
1986 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1989 /* -----------------------------------------------------------------------------
1990 Scavenge a given step until there are no more objects in this step
1993 evac_gen is set by the caller to be either zero (for a step in a
1994 generation < N) or G where G is the generation of the step being
1997 We sometimes temporarily change evac_gen back to zero if we're
1998 scavenging a mutable object where early promotion isn't such a good
2000 -------------------------------------------------------------------------- */
2008 nat saved_evac_gen = evac_gen;
2013 failed_to_evac = rtsFalse;
2015 /* scavenge phase - standard breadth-first scavenging of the
2019 while (bd != stp->hp_bd || p < stp->hp) {
2021 // If we're at the end of this block, move on to the next block
2022 if (bd != stp->hp_bd && p == bd->free) {
2028 info = get_itbl((StgClosure *)p);
2029 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2032 switch (info->type) {
2035 /* treat MVars specially, because we don't want to evacuate the
2036 * mut_link field in the middle of the closure.
2039 StgMVar *mvar = ((StgMVar *)p);
2041 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2042 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2043 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2044 evac_gen = saved_evac_gen;
2045 recordMutable((StgMutClosure *)mvar);
2046 failed_to_evac = rtsFalse; // mutable.
2047 p += sizeofW(StgMVar);
2055 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2056 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2057 p += sizeofW(StgHeader) + 2;
2062 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2063 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2069 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2070 p += sizeofW(StgHeader) + 1;
2075 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2081 p += sizeofW(StgHeader) + 1;
2088 p += sizeofW(StgHeader) + 2;
2095 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2096 p += sizeofW(StgHeader) + 2;
2112 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2113 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2114 (StgClosure *)*p = evacuate((StgClosure *)*p);
2116 p += info->layout.payload.nptrs;
2121 if (stp->gen_no != 0) {
2122 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2125 case IND_OLDGEN_PERM:
2126 ((StgIndOldGen *)p)->indirectee =
2127 evacuate(((StgIndOldGen *)p)->indirectee);
2128 if (failed_to_evac) {
2129 failed_to_evac = rtsFalse;
2130 recordOldToNewPtrs((StgMutClosure *)p);
2132 p += sizeofW(StgIndOldGen);
2137 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2138 evac_gen = saved_evac_gen;
2139 recordMutable((StgMutClosure *)p);
2140 failed_to_evac = rtsFalse; // mutable anyhow
2141 p += sizeofW(StgMutVar);
2146 failed_to_evac = rtsFalse; // mutable anyhow
2147 p += sizeofW(StgMutVar);
2151 case SE_CAF_BLACKHOLE:
2154 p += BLACKHOLE_sizeW();
2159 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2160 (StgClosure *)bh->blocking_queue =
2161 evacuate((StgClosure *)bh->blocking_queue);
2162 recordMutable((StgMutClosure *)bh);
2163 failed_to_evac = rtsFalse;
2164 p += BLACKHOLE_sizeW();
2168 case THUNK_SELECTOR:
2170 StgSelector *s = (StgSelector *)p;
2171 s->selectee = evacuate(s->selectee);
2172 p += THUNK_SELECTOR_sizeW();
2176 case AP_UPD: // same as PAPs
2178 /* Treat a PAP just like a section of stack, not forgetting to
2179 * evacuate the function pointer too...
2182 StgPAP* pap = (StgPAP *)p;
2184 pap->fun = evacuate(pap->fun);
2185 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2186 p += pap_sizeW(pap);
2191 // nothing to follow
2192 p += arr_words_sizeW((StgArrWords *)p);
2196 // follow everything
2200 evac_gen = 0; // repeatedly mutable
2201 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2202 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2203 (StgClosure *)*p = evacuate((StgClosure *)*p);
2205 evac_gen = saved_evac_gen;
2206 recordMutable((StgMutClosure *)q);
2207 failed_to_evac = rtsFalse; // mutable anyhow.
2211 case MUT_ARR_PTRS_FROZEN:
2212 // follow everything
2216 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2217 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2218 (StgClosure *)*p = evacuate((StgClosure *)*p);
2220 // it's tempting to recordMutable() if failed_to_evac is
2221 // false, but that breaks some assumptions (eg. every
2222 // closure on the mutable list is supposed to have the MUT
2223 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2229 StgTSO *tso = (StgTSO *)p;
2232 evac_gen = saved_evac_gen;
2233 recordMutable((StgMutClosure *)tso);
2234 failed_to_evac = rtsFalse; // mutable anyhow.
2235 p += tso_sizeW(tso);
2240 case RBH: // cf. BLACKHOLE_BQ
2243 nat size, ptrs, nonptrs, vhs;
2245 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2247 StgRBH *rbh = (StgRBH *)p;
2248 (StgClosure *)rbh->blocking_queue =
2249 evacuate((StgClosure *)rbh->blocking_queue);
2250 recordMutable((StgMutClosure *)to);
2251 failed_to_evac = rtsFalse; // mutable anyhow.
2253 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2254 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2255 // ToDo: use size of reverted closure here!
2256 p += BLACKHOLE_sizeW();
2262 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2263 // follow the pointer to the node which is being demanded
2264 (StgClosure *)bf->node =
2265 evacuate((StgClosure *)bf->node);
2266 // follow the link to the rest of the blocking queue
2267 (StgClosure *)bf->link =
2268 evacuate((StgClosure *)bf->link);
2269 if (failed_to_evac) {
2270 failed_to_evac = rtsFalse;
2271 recordMutable((StgMutClosure *)bf);
2274 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2275 bf, info_type((StgClosure *)bf),
2276 bf->node, info_type(bf->node)));
2277 p += sizeofW(StgBlockedFetch);
2285 p += sizeofW(StgFetchMe);
2286 break; // nothing to do in this case
2288 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2290 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2291 (StgClosure *)fmbq->blocking_queue =
2292 evacuate((StgClosure *)fmbq->blocking_queue);
2293 if (failed_to_evac) {
2294 failed_to_evac = rtsFalse;
2295 recordMutable((StgMutClosure *)fmbq);
2298 belch("@@ scavenge: %p (%s) exciting, isn't it",
2299 p, info_type((StgClosure *)p)));
2300 p += sizeofW(StgFetchMeBlockingQueue);
2306 barf("scavenge: unimplemented/strange closure type %d @ %p",
2310 /* If we didn't manage to promote all the objects pointed to by
2311 * the current object, then we have to designate this object as
2312 * mutable (because it contains old-to-new generation pointers).
2314 if (failed_to_evac) {
2315 failed_to_evac = rtsFalse;
2316 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2324 /* -----------------------------------------------------------------------------
2325 Scavenge everything on the mark stack.
2327 This is slightly different from scavenge():
2328 - we don't walk linearly through the objects, so the scavenger
2329 doesn't need to advance the pointer on to the next object.
2330 -------------------------------------------------------------------------- */
2333 scavenge_mark_stack(void)
2339 evac_gen = oldest_gen->no;
2340 saved_evac_gen = evac_gen;
2342 while (!mark_stack_empty()) {
2343 p = pop_mark_stack();
2345 info = get_itbl((StgClosure *)p);
2346 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2348 switch (info->type) {
2351 /* treat MVars specially, because we don't want to evacuate the
2352 * mut_link field in the middle of the closure.
2355 StgMVar *mvar = ((StgMVar *)p);
2357 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2358 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2359 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2360 evac_gen = saved_evac_gen;
2361 failed_to_evac = rtsFalse; // mutable.
2369 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2370 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2380 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2405 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2406 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2407 (StgClosure *)*p = evacuate((StgClosure *)*p);
2413 // don't need to do anything here: the only possible case
2414 // is that we're in a 1-space compacting collector, with
2415 // no "old" generation.
2419 case IND_OLDGEN_PERM:
2420 ((StgIndOldGen *)p)->indirectee =
2421 evacuate(((StgIndOldGen *)p)->indirectee);
2422 if (failed_to_evac) {
2423 recordOldToNewPtrs((StgMutClosure *)p);
2425 failed_to_evac = rtsFalse;
2430 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2431 evac_gen = saved_evac_gen;
2432 failed_to_evac = rtsFalse;
2437 failed_to_evac = rtsFalse;
2441 case SE_CAF_BLACKHOLE:
2449 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2450 (StgClosure *)bh->blocking_queue =
2451 evacuate((StgClosure *)bh->blocking_queue);
2452 failed_to_evac = rtsFalse;
2456 case THUNK_SELECTOR:
2458 StgSelector *s = (StgSelector *)p;
2459 s->selectee = evacuate(s->selectee);
2463 case AP_UPD: // same as PAPs
2465 /* Treat a PAP just like a section of stack, not forgetting to
2466 * evacuate the function pointer too...
2469 StgPAP* pap = (StgPAP *)p;
2471 pap->fun = evacuate(pap->fun);
2472 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2477 // follow everything
2481 evac_gen = 0; // repeatedly mutable
2482 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2483 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2484 (StgClosure *)*p = evacuate((StgClosure *)*p);
2486 evac_gen = saved_evac_gen;
2487 failed_to_evac = rtsFalse; // mutable anyhow.
2491 case MUT_ARR_PTRS_FROZEN:
2492 // follow everything
2496 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2497 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2498 (StgClosure *)*p = evacuate((StgClosure *)*p);
2505 StgTSO *tso = (StgTSO *)p;
2508 evac_gen = saved_evac_gen;
2509 failed_to_evac = rtsFalse;
2514 case RBH: // cf. BLACKHOLE_BQ
2517 nat size, ptrs, nonptrs, vhs;
2519 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2521 StgRBH *rbh = (StgRBH *)p;
2522 (StgClosure *)rbh->blocking_queue =
2523 evacuate((StgClosure *)rbh->blocking_queue);
2524 recordMutable((StgMutClosure *)rbh);
2525 failed_to_evac = rtsFalse; // mutable anyhow.
2527 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2528 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2534 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2535 // follow the pointer to the node which is being demanded
2536 (StgClosure *)bf->node =
2537 evacuate((StgClosure *)bf->node);
2538 // follow the link to the rest of the blocking queue
2539 (StgClosure *)bf->link =
2540 evacuate((StgClosure *)bf->link);
2541 if (failed_to_evac) {
2542 failed_to_evac = rtsFalse;
2543 recordMutable((StgMutClosure *)bf);
2546 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2547 bf, info_type((StgClosure *)bf),
2548 bf->node, info_type(bf->node)));
2556 break; // nothing to do in this case
2558 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2560 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2561 (StgClosure *)fmbq->blocking_queue =
2562 evacuate((StgClosure *)fmbq->blocking_queue);
2563 if (failed_to_evac) {
2564 failed_to_evac = rtsFalse;
2565 recordMutable((StgMutClosure *)fmbq);
2568 belch("@@ scavenge: %p (%s) exciting, isn't it",
2569 p, info_type((StgClosure *)p)));
2575 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2579 if (failed_to_evac) {
2580 failed_to_evac = rtsFalse;
2581 mkMutCons((StgClosure *)p, &generations[evac_gen]);
2584 } // while (!mark_stack_empty())
2587 /* -----------------------------------------------------------------------------
2588 Scavenge one object.
2590 This is used for objects that are temporarily marked as mutable
2591 because they contain old-to-new generation pointers. Only certain
2592 objects can have this property.
2593 -------------------------------------------------------------------------- */
2596 scavenge_one(StgClosure *p)
2598 const StgInfoTable *info;
2601 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2602 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2606 switch (info -> type) {
2609 case FUN_1_0: // hardly worth specialising these guys
2629 case IND_OLDGEN_PERM:
2633 end = (P_)p->payload + info->layout.payload.ptrs;
2634 for (q = (P_)p->payload; q < end; q++) {
2635 (StgClosure *)*q = evacuate((StgClosure *)*q);
2641 case SE_CAF_BLACKHOLE:
2646 case THUNK_SELECTOR:
2648 StgSelector *s = (StgSelector *)p;
2649 s->selectee = evacuate(s->selectee);
2653 case AP_UPD: /* same as PAPs */
2655 /* Treat a PAP just like a section of stack, not forgetting to
2656 * evacuate the function pointer too...
2659 StgPAP* pap = (StgPAP *)p;
2661 pap->fun = evacuate(pap->fun);
2662 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2667 /* This might happen if for instance a MUT_CONS was pointing to a
2668 * THUNK which has since been updated. The IND_OLDGEN will
2669 * be on the mutable list anyway, so we don't need to do anything
2674 case MUT_ARR_PTRS_FROZEN:
2676 // follow everything
2680 next = q + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2681 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < next; q++) {
2682 (StgClosure *)*q = evacuate((StgClosure *)*q);
2688 barf("scavenge_one: strange object %d", (int)(info->type));
2691 no_luck = failed_to_evac;
2692 failed_to_evac = rtsFalse;
2696 /* -----------------------------------------------------------------------------
2697 Scavenging mutable lists.
2699 We treat the mutable list of each generation > N (i.e. all the
2700 generations older than the one being collected) as roots. We also
2701 remove non-mutable objects from the mutable list at this point.
2702 -------------------------------------------------------------------------- */
2705 scavenge_mut_once_list(generation *gen)
2707 const StgInfoTable *info;
2708 StgMutClosure *p, *next, *new_list;
2710 p = gen->mut_once_list;
2711 new_list = END_MUT_LIST;
2715 failed_to_evac = rtsFalse;
2717 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2719 // make sure the info pointer is into text space
2720 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2721 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2725 if (info->type==RBH)
2726 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2728 switch(info->type) {
2731 case IND_OLDGEN_PERM:
2733 /* Try to pull the indirectee into this generation, so we can
2734 * remove the indirection from the mutable list.
2736 ((StgIndOldGen *)p)->indirectee =
2737 evacuate(((StgIndOldGen *)p)->indirectee);
2739 #if 0 && defined(DEBUG)
2740 if (RtsFlags.DebugFlags.gc)
2741 /* Debugging code to print out the size of the thing we just
2745 StgPtr start = gen->steps[0].scan;
2746 bdescr *start_bd = gen->steps[0].scan_bd;
2748 scavenge(&gen->steps[0]);
2749 if (start_bd != gen->steps[0].scan_bd) {
2750 size += (P_)BLOCK_ROUND_UP(start) - start;
2751 start_bd = start_bd->link;
2752 while (start_bd != gen->steps[0].scan_bd) {
2753 size += BLOCK_SIZE_W;
2754 start_bd = start_bd->link;
2756 size += gen->steps[0].scan -
2757 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2759 size = gen->steps[0].scan - start;
2761 fprintf(stderr,"evac IND_OLDGEN: %ld bytes\n", size * sizeof(W_));
2765 /* failed_to_evac might happen if we've got more than two
2766 * generations, we're collecting only generation 0, the
2767 * indirection resides in generation 2 and the indirectee is
2770 if (failed_to_evac) {
2771 failed_to_evac = rtsFalse;
2772 p->mut_link = new_list;
2775 /* the mut_link field of an IND_STATIC is overloaded as the
2776 * static link field too (it just so happens that we don't need
2777 * both at the same time), so we need to NULL it out when
2778 * removing this object from the mutable list because the static
2779 * link fields are all assumed to be NULL before doing a major
2787 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2788 * it from the mutable list if possible by promoting whatever it
2791 if (scavenge_one((StgClosure *)((StgMutVar *)p)->var)) {
2792 /* didn't manage to promote everything, so put the
2793 * MUT_CONS back on the list.
2795 p->mut_link = new_list;
2801 // shouldn't have anything else on the mutables list
2802 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2806 gen->mut_once_list = new_list;
2811 scavenge_mutable_list(generation *gen)
2813 const StgInfoTable *info;
2814 StgMutClosure *p, *next;
2816 p = gen->saved_mut_list;
2820 failed_to_evac = rtsFalse;
2822 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2824 // make sure the info pointer is into text space
2825 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2826 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2830 if (info->type==RBH)
2831 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2833 switch(info->type) {
2836 // follow everything
2837 p->mut_link = gen->mut_list;
2842 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2843 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2844 (StgClosure *)*q = evacuate((StgClosure *)*q);
2849 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2850 case MUT_ARR_PTRS_FROZEN:
2855 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2856 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2857 (StgClosure *)*q = evacuate((StgClosure *)*q);
2861 if (failed_to_evac) {
2862 failed_to_evac = rtsFalse;
2863 mkMutCons((StgClosure *)p, gen);
2869 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2870 p->mut_link = gen->mut_list;
2876 StgMVar *mvar = (StgMVar *)p;
2877 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2878 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2879 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2880 p->mut_link = gen->mut_list;
2887 StgTSO *tso = (StgTSO *)p;
2891 /* Don't take this TSO off the mutable list - it might still
2892 * point to some younger objects (because we set evac_gen to 0
2895 tso->mut_link = gen->mut_list;
2896 gen->mut_list = (StgMutClosure *)tso;
2902 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2903 (StgClosure *)bh->blocking_queue =
2904 evacuate((StgClosure *)bh->blocking_queue);
2905 p->mut_link = gen->mut_list;
2910 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2913 case IND_OLDGEN_PERM:
2914 /* Try to pull the indirectee into this generation, so we can
2915 * remove the indirection from the mutable list.
2918 ((StgIndOldGen *)p)->indirectee =
2919 evacuate(((StgIndOldGen *)p)->indirectee);
2922 if (failed_to_evac) {
2923 failed_to_evac = rtsFalse;
2924 p->mut_link = gen->mut_once_list;
2925 gen->mut_once_list = p;
2932 // HWL: check whether all of these are necessary
2934 case RBH: // cf. BLACKHOLE_BQ
2936 // nat size, ptrs, nonptrs, vhs;
2938 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2939 StgRBH *rbh = (StgRBH *)p;
2940 (StgClosure *)rbh->blocking_queue =
2941 evacuate((StgClosure *)rbh->blocking_queue);
2942 if (failed_to_evac) {
2943 failed_to_evac = rtsFalse;
2944 recordMutable((StgMutClosure *)rbh);
2946 // ToDo: use size of reverted closure here!
2947 p += BLACKHOLE_sizeW();
2953 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2954 // follow the pointer to the node which is being demanded
2955 (StgClosure *)bf->node =
2956 evacuate((StgClosure *)bf->node);
2957 // follow the link to the rest of the blocking queue
2958 (StgClosure *)bf->link =
2959 evacuate((StgClosure *)bf->link);
2960 if (failed_to_evac) {
2961 failed_to_evac = rtsFalse;
2962 recordMutable((StgMutClosure *)bf);
2964 p += sizeofW(StgBlockedFetch);
2970 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
2973 p += sizeofW(StgFetchMe);
2974 break; // nothing to do in this case
2976 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2978 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2979 (StgClosure *)fmbq->blocking_queue =
2980 evacuate((StgClosure *)fmbq->blocking_queue);
2981 if (failed_to_evac) {
2982 failed_to_evac = rtsFalse;
2983 recordMutable((StgMutClosure *)fmbq);
2985 p += sizeofW(StgFetchMeBlockingQueue);
2991 // shouldn't have anything else on the mutables list
2992 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2999 scavenge_static(void)
3001 StgClosure* p = static_objects;
3002 const StgInfoTable *info;
3004 /* Always evacuate straight to the oldest generation for static
3006 evac_gen = oldest_gen->no;
3008 /* keep going until we've scavenged all the objects on the linked
3010 while (p != END_OF_STATIC_LIST) {
3014 if (info->type==RBH)
3015 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3017 // make sure the info pointer is into text space
3018 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3019 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3021 /* Take this object *off* the static_objects list,
3022 * and put it on the scavenged_static_objects list.
3024 static_objects = STATIC_LINK(info,p);
3025 STATIC_LINK(info,p) = scavenged_static_objects;
3026 scavenged_static_objects = p;
3028 switch (info -> type) {
3032 StgInd *ind = (StgInd *)p;
3033 ind->indirectee = evacuate(ind->indirectee);
3035 /* might fail to evacuate it, in which case we have to pop it
3036 * back on the mutable list (and take it off the
3037 * scavenged_static list because the static link and mut link
3038 * pointers are one and the same).
3040 if (failed_to_evac) {
3041 failed_to_evac = rtsFalse;
3042 scavenged_static_objects = STATIC_LINK(info,p);
3043 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3044 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3058 next = (P_)p->payload + info->layout.payload.ptrs;
3059 // evacuate the pointers
3060 for (q = (P_)p->payload; q < next; q++) {
3061 (StgClosure *)*q = evacuate((StgClosure *)*q);
3067 barf("scavenge_static: strange closure %d", (int)(info->type));
3070 ASSERT(failed_to_evac == rtsFalse);
3072 /* get the next static object from the list. Remember, there might
3073 * be more stuff on this list now that we've done some evacuating!
3074 * (static_objects is a global)
3080 /* -----------------------------------------------------------------------------
3081 scavenge_stack walks over a section of stack and evacuates all the
3082 objects pointed to by it. We can use the same code for walking
3083 PAPs, since these are just sections of copied stack.
3084 -------------------------------------------------------------------------- */
3087 scavenge_stack(StgPtr p, StgPtr stack_end)
3090 const StgInfoTable* info;
3093 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3096 * Each time around this loop, we are looking at a chunk of stack
3097 * that starts with either a pending argument section or an
3098 * activation record.
3101 while (p < stack_end) {
3104 // If we've got a tag, skip over that many words on the stack
3105 if (IS_ARG_TAG((W_)q)) {
3110 /* Is q a pointer to a closure?
3112 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3114 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3115 ASSERT(closure_STATIC((StgClosure *)q));
3117 // otherwise, must be a pointer into the allocation space.
3120 (StgClosure *)*p = evacuate((StgClosure *)q);
3126 * Otherwise, q must be the info pointer of an activation
3127 * record. All activation records have 'bitmap' style layout
3130 info = get_itbl((StgClosure *)p);
3132 switch (info->type) {
3134 // Dynamic bitmap: the mask is stored on the stack
3136 bitmap = ((StgRetDyn *)p)->liveness;
3137 p = (P_)&((StgRetDyn *)p)->payload[0];
3140 // probably a slow-entry point return address:
3148 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3149 old_p, p, old_p+1));
3151 p++; // what if FHS!=1 !? -- HWL
3156 /* Specialised code for update frames, since they're so common.
3157 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3158 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3162 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3164 p += sizeofW(StgUpdateFrame);
3167 frame->updatee = evacuate(frame->updatee);
3169 #else // specialised code for update frames, not sure if it's worth it.
3171 nat type = get_itbl(frame->updatee)->type;
3173 if (type == EVACUATED) {
3174 frame->updatee = evacuate(frame->updatee);
3177 bdescr *bd = Bdescr((P_)frame->updatee);
3179 if (bd->gen_no > N) {
3180 if (bd->gen_no < evac_gen) {
3181 failed_to_evac = rtsTrue;
3186 // Don't promote blackholes
3188 if (!(stp->gen_no == 0 &&
3190 stp->no == stp->gen->n_steps-1)) {
3197 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3198 sizeofW(StgHeader), stp);
3199 frame->updatee = to;
3202 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3203 frame->updatee = to;
3204 recordMutable((StgMutClosure *)to);
3207 /* will never be SE_{,CAF_}BLACKHOLE, since we
3208 don't push an update frame for single-entry thunks. KSW 1999-01. */
3209 barf("scavenge_stack: UPDATE_FRAME updatee");
3215 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3222 bitmap = info->layout.bitmap;
3224 // this assumes that the payload starts immediately after the info-ptr
3226 while (bitmap != 0) {
3227 if ((bitmap & 1) == 0) {
3228 (StgClosure *)*p = evacuate((StgClosure *)*p);
3231 bitmap = bitmap >> 1;
3238 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3243 StgLargeBitmap *large_bitmap;
3246 large_bitmap = info->layout.large_bitmap;
3249 for (i=0; i<large_bitmap->size; i++) {
3250 bitmap = large_bitmap->bitmap[i];
3251 q = p + BITS_IN(W_);
3252 while (bitmap != 0) {
3253 if ((bitmap & 1) == 0) {
3254 (StgClosure *)*p = evacuate((StgClosure *)*p);
3257 bitmap = bitmap >> 1;
3259 if (i+1 < large_bitmap->size) {
3261 (StgClosure *)*p = evacuate((StgClosure *)*p);
3267 // and don't forget to follow the SRT
3272 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3277 /*-----------------------------------------------------------------------------
3278 scavenge the large object list.
3280 evac_gen set by caller; similar games played with evac_gen as with
3281 scavenge() - see comment at the top of scavenge(). Most large
3282 objects are (repeatedly) mutable, so most of the time evac_gen will
3284 --------------------------------------------------------------------------- */
3287 scavenge_large(step *stp)
3291 const StgInfoTable* info;
3292 nat saved_evac_gen = evac_gen; // used for temporarily changing evac_gen
3294 bd = stp->new_large_objects;
3296 for (; bd != NULL; bd = stp->new_large_objects) {
3298 /* take this object *off* the large objects list and put it on
3299 * the scavenged large objects list. This is so that we can
3300 * treat new_large_objects as a stack and push new objects on
3301 * the front when evacuating.
3303 stp->new_large_objects = bd->link;
3304 dbl_link_onto(bd, &stp->scavenged_large_objects);
3306 // update the block count in this step.
3307 stp->n_scavenged_large_blocks += bd->blocks;
3310 info = get_itbl((StgClosure *)p);
3312 // only certain objects can be "large"...
3314 switch (info->type) {
3317 // nothing to follow
3322 // follow everything
3325 evac_gen = 0; // repeatedly mutable
3326 recordMutable((StgMutClosure *)p);
3327 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3328 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3329 (StgClosure *)*p = evacuate((StgClosure *)*p);
3331 evac_gen = saved_evac_gen;
3332 failed_to_evac = rtsFalse;
3336 case MUT_ARR_PTRS_FROZEN:
3338 // follow everything
3341 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3342 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3343 (StgClosure *)*p = evacuate((StgClosure *)*p);
3350 StgTSO *tso = (StgTSO *)p;
3352 evac_gen = 0; // repeatedly mutable
3354 recordMutable((StgMutClosure *)tso);
3355 evac_gen = saved_evac_gen;
3356 failed_to_evac = rtsFalse;
3363 StgPAP* pap = (StgPAP *)p;
3364 pap->fun = evacuate(pap->fun);
3365 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3370 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3373 if (failed_to_evac) {
3374 failed_to_evac = rtsFalse;
3375 mkMutCons((StgClosure *)q, &generations[evac_gen]);
3380 /* -----------------------------------------------------------------------------
3381 Initialising the static object & mutable lists
3382 -------------------------------------------------------------------------- */
3385 zero_static_object_list(StgClosure* first_static)
3389 const StgInfoTable *info;
3391 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3393 link = STATIC_LINK(info, p);
3394 STATIC_LINK(info,p) = NULL;
3398 /* This function is only needed because we share the mutable link
3399 * field with the static link field in an IND_STATIC, so we have to
3400 * zero the mut_link field before doing a major GC, which needs the
3401 * static link field.
3403 * It doesn't do any harm to zero all the mutable link fields on the
3408 zero_mutable_list( StgMutClosure *first )
3410 StgMutClosure *next, *c;
3412 for (c = first; c != END_MUT_LIST; c = next) {
3418 /* -----------------------------------------------------------------------------
3420 -------------------------------------------------------------------------- */
3427 for (c = (StgIndStatic *)caf_list; c != NULL;
3428 c = (StgIndStatic *)c->static_link)
3430 c->header.info = c->saved_info;
3431 c->saved_info = NULL;
3432 // could, but not necessary: c->static_link = NULL;
3438 scavengeCAFs( void )
3443 for (c = (StgIndStatic *)caf_list; c != NULL;
3444 c = (StgIndStatic *)c->static_link)
3446 c->indirectee = evacuate(c->indirectee);
3450 /* -----------------------------------------------------------------------------
3451 Sanity code for CAF garbage collection.
3453 With DEBUG turned on, we manage a CAF list in addition to the SRT
3454 mechanism. After GC, we run down the CAF list and blackhole any
3455 CAFs which have been garbage collected. This means we get an error
3456 whenever the program tries to enter a garbage collected CAF.
3458 Any garbage collected CAFs are taken off the CAF list at the same
3460 -------------------------------------------------------------------------- */
3462 #if 0 && defined(DEBUG)
3469 const StgInfoTable *info;
3480 ASSERT(info->type == IND_STATIC);
3482 if (STATIC_LINK(info,p) == NULL) {
3483 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04lx\n", (long)p));
3485 SET_INFO(p,&stg_BLACKHOLE_info);
3486 p = STATIC_LINK2(info,p);
3490 pp = &STATIC_LINK2(info,p);
3497 // fprintf(stderr, "%d CAFs live\n", i);
3502 /* -----------------------------------------------------------------------------
3505 Whenever a thread returns to the scheduler after possibly doing
3506 some work, we have to run down the stack and black-hole all the
3507 closures referred to by update frames.
3508 -------------------------------------------------------------------------- */
3511 threadLazyBlackHole(StgTSO *tso)
3513 StgUpdateFrame *update_frame;
3514 StgBlockingQueue *bh;
3517 stack_end = &tso->stack[tso->stack_size];
3518 update_frame = tso->su;
3521 switch (get_itbl(update_frame)->type) {
3524 update_frame = ((StgCatchFrame *)update_frame)->link;
3528 bh = (StgBlockingQueue *)update_frame->updatee;
3530 /* if the thunk is already blackholed, it means we've also
3531 * already blackholed the rest of the thunks on this stack,
3532 * so we can stop early.
3534 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3535 * don't interfere with this optimisation.
3537 if (bh->header.info == &stg_BLACKHOLE_info) {
3541 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3542 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3543 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3544 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3546 SET_INFO(bh,&stg_BLACKHOLE_info);
3549 update_frame = update_frame->link;
3553 update_frame = ((StgSeqFrame *)update_frame)->link;
3559 barf("threadPaused");
3565 /* -----------------------------------------------------------------------------
3568 * Code largely pinched from old RTS, then hacked to bits. We also do
3569 * lazy black holing here.
3571 * -------------------------------------------------------------------------- */
3574 threadSqueezeStack(StgTSO *tso)
3576 lnat displacement = 0;
3577 StgUpdateFrame *frame;
3578 StgUpdateFrame *next_frame; // Temporally next
3579 StgUpdateFrame *prev_frame; // Temporally previous
3581 rtsBool prev_was_update_frame;
3583 StgUpdateFrame *top_frame;
3584 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3586 void printObj( StgClosure *obj ); // from Printer.c
3588 top_frame = tso->su;
3591 bottom = &(tso->stack[tso->stack_size]);
3594 /* There must be at least one frame, namely the STOP_FRAME.
3596 ASSERT((P_)frame < bottom);
3598 /* Walk down the stack, reversing the links between frames so that
3599 * we can walk back up as we squeeze from the bottom. Note that
3600 * next_frame and prev_frame refer to next and previous as they were
3601 * added to the stack, rather than the way we see them in this
3602 * walk. (It makes the next loop less confusing.)
3604 * Stop if we find an update frame pointing to a black hole
3605 * (see comment in threadLazyBlackHole()).
3609 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3610 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3611 prev_frame = frame->link;
3612 frame->link = next_frame;
3617 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3618 printObj((StgClosure *)prev_frame);
3619 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3622 switch (get_itbl(frame)->type) {
3625 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3638 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3640 printObj((StgClosure *)prev_frame);
3643 if (get_itbl(frame)->type == UPDATE_FRAME
3644 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3649 /* Now, we're at the bottom. Frame points to the lowest update
3650 * frame on the stack, and its link actually points to the frame
3651 * above. We have to walk back up the stack, squeezing out empty
3652 * update frames and turning the pointers back around on the way
3655 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3656 * we never want to eliminate it anyway. Just walk one step up
3657 * before starting to squeeze. When you get to the topmost frame,
3658 * remember that there are still some words above it that might have
3665 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3668 * Loop through all of the frames (everything except the very
3669 * bottom). Things are complicated by the fact that we have
3670 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3671 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3673 while (frame != NULL) {
3675 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3676 rtsBool is_update_frame;
3678 next_frame = frame->link;
3679 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3682 * 1. both the previous and current frame are update frames
3683 * 2. the current frame is empty
3685 if (prev_was_update_frame && is_update_frame &&
3686 (P_)prev_frame == frame_bottom + displacement) {
3688 // Now squeeze out the current frame
3689 StgClosure *updatee_keep = prev_frame->updatee;
3690 StgClosure *updatee_bypass = frame->updatee;
3693 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3697 /* Deal with blocking queues. If both updatees have blocked
3698 * threads, then we should merge the queues into the update
3699 * frame that we're keeping.
3701 * Alternatively, we could just wake them up: they'll just go
3702 * straight to sleep on the proper blackhole! This is less code
3703 * and probably less bug prone, although it's probably much
3706 #if 0 // do it properly...
3707 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3708 # error Unimplemented lazy BH warning. (KSW 1999-01)
3710 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3711 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3713 // Sigh. It has one. Don't lose those threads!
3714 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3715 // Urgh. Two queues. Merge them.
3716 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3718 while (keep_tso->link != END_TSO_QUEUE) {
3719 keep_tso = keep_tso->link;
3721 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3724 // For simplicity, just swap the BQ for the BH
3725 P_ temp = updatee_keep;
3727 updatee_keep = updatee_bypass;
3728 updatee_bypass = temp;
3730 // Record the swap in the kept frame (below)
3731 prev_frame->updatee = updatee_keep;
3736 TICK_UPD_SQUEEZED();
3737 /* wasn't there something about update squeezing and ticky to be
3738 * sorted out? oh yes: we aren't counting each enter properly
3739 * in this case. See the log somewhere. KSW 1999-04-21
3741 * Check two things: that the two update frames don't point to
3742 * the same object, and that the updatee_bypass isn't already an
3743 * indirection. Both of these cases only happen when we're in a
3744 * block hole-style loop (and there are multiple update frames
3745 * on the stack pointing to the same closure), but they can both
3746 * screw us up if we don't check.
3748 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3749 // this wakes the threads up
3750 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3753 sp = (P_)frame - 1; // sp = stuff to slide
3754 displacement += sizeofW(StgUpdateFrame);
3757 // No squeeze for this frame
3758 sp = frame_bottom - 1; // Keep the current frame
3760 /* Do lazy black-holing.
3762 if (is_update_frame) {
3763 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3764 if (bh->header.info != &stg_BLACKHOLE_info &&
3765 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3766 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3767 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3768 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3771 /* zero out the slop so that the sanity checker can tell
3772 * where the next closure is.
3775 StgInfoTable *info = get_itbl(bh);
3776 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3777 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3778 * info is used for a different purpose, and it's exactly the
3779 * same size as a BLACKHOLE in any case.
3781 if (info->type != THUNK_SELECTOR) {
3782 for (i = np; i < np + nw; i++) {
3783 ((StgClosure *)bh)->payload[i] = 0;
3788 SET_INFO(bh,&stg_BLACKHOLE_info);
3792 // Fix the link in the current frame (should point to the frame below)
3793 frame->link = prev_frame;
3794 prev_was_update_frame = is_update_frame;
3797 // Now slide all words from sp up to the next frame
3799 if (displacement > 0) {
3800 P_ next_frame_bottom;
3802 if (next_frame != NULL)
3803 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3805 next_frame_bottom = tso->sp - 1;
3809 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3813 while (sp >= next_frame_bottom) {
3814 sp[displacement] = *sp;
3818 (P_)prev_frame = (P_)frame + displacement;
3822 tso->sp += displacement;
3823 tso->su = prev_frame;
3826 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3827 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3832 /* -----------------------------------------------------------------------------
3835 * We have to prepare for GC - this means doing lazy black holing
3836 * here. We also take the opportunity to do stack squeezing if it's
3838 * -------------------------------------------------------------------------- */
3840 threadPaused(StgTSO *tso)
3842 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3843 threadSqueezeStack(tso); // does black holing too
3845 threadLazyBlackHole(tso);
3848 /* -----------------------------------------------------------------------------
3850 * -------------------------------------------------------------------------- */
3854 printMutOnceList(generation *gen)
3856 StgMutClosure *p, *next;
3858 p = gen->mut_once_list;
3861 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3862 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3863 fprintf(stderr, "%p (%s), ",
3864 p, info_type((StgClosure *)p));
3866 fputc('\n', stderr);
3870 printMutableList(generation *gen)
3872 StgMutClosure *p, *next;
3877 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3878 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3879 fprintf(stderr, "%p (%s), ",
3880 p, info_type((StgClosure *)p));
3882 fputc('\n', stderr);
3885 static inline rtsBool
3886 maybeLarge(StgClosure *closure)
3888 StgInfoTable *info = get_itbl(closure);
3890 /* closure types that may be found on the new_large_objects list;
3891 see scavenge_large */
3892 return (info->type == MUT_ARR_PTRS ||
3893 info->type == MUT_ARR_PTRS_FROZEN ||
3894 info->type == TSO ||
3895 info->type == ARR_WORDS);