1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.108 2001/07/25 09:14:21 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void cleanup_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgClosure *p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
145 static void scavengeCAFs ( void );
147 #if 0 && defined(DEBUG)
148 static void gcCAFs ( void );
151 /* -----------------------------------------------------------------------------
152 inline functions etc. for dealing with the mark bitmap & stack.
153 -------------------------------------------------------------------------- */
155 #define MARK_STACK_BLOCKS 4
157 static bdescr *mark_stack_bdescr;
158 static StgPtr *mark_stack;
159 static StgPtr *mark_sp;
160 static StgPtr *mark_splim;
162 static inline rtsBool
163 mark_stack_empty(void)
165 return mark_sp == mark_stack;
168 static inline rtsBool
169 mark_stack_full(void)
171 return mark_sp >= mark_splim;
175 push_mark_stack(StgPtr p)
186 /* -----------------------------------------------------------------------------
189 For garbage collecting generation N (and all younger generations):
191 - follow all pointers in the root set. the root set includes all
192 mutable objects in all steps in all generations.
194 - for each pointer, evacuate the object it points to into either
195 + to-space in the next higher step in that generation, if one exists,
196 + if the object's generation == N, then evacuate it to the next
197 generation if one exists, or else to-space in the current
199 + if the object's generation < N, then evacuate it to to-space
200 in the next generation.
202 - repeatedly scavenge to-space from each step in each generation
203 being collected until no more objects can be evacuated.
205 - free from-space in each step, and set from-space = to-space.
207 -------------------------------------------------------------------------- */
210 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
214 lnat live, allocated, collected = 0, copied = 0;
215 lnat oldgen_saved_blocks = 0;
219 CostCentreStack *prev_CCS;
222 #if defined(DEBUG) && defined(GRAN)
223 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
227 // tell the stats department that we've started a GC
230 // Init stats and print par specific (timing) info
231 PAR_TICKY_PAR_START();
233 // attribute any costs to CCS_GC
239 /* Approximate how much we allocated.
240 * Todo: only when generating stats?
242 allocated = calcAllocated();
244 /* Figure out which generation to collect
246 if (force_major_gc) {
247 N = RtsFlags.GcFlags.generations - 1;
251 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
252 if (generations[g].steps[0].n_blocks +
253 generations[g].steps[0].n_large_blocks
254 >= generations[g].max_blocks) {
258 major_gc = (N == RtsFlags.GcFlags.generations-1);
261 #ifdef RTS_GTK_FRONTPANEL
262 if (RtsFlags.GcFlags.frontpanel) {
263 updateFrontPanelBeforeGC(N);
267 // check stack sanity *before* GC (ToDo: check all threads)
269 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
271 IF_DEBUG(sanity, checkFreeListSanity());
273 /* Initialise the static object lists
275 static_objects = END_OF_STATIC_LIST;
276 scavenged_static_objects = END_OF_STATIC_LIST;
278 /* zero the mutable list for the oldest generation (see comment by
279 * zero_mutable_list below).
282 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
285 /* Save the old to-space if we're doing a two-space collection
287 if (RtsFlags.GcFlags.generations == 1) {
288 old_to_blocks = g0s0->to_blocks;
289 g0s0->to_blocks = NULL;
292 /* Keep a count of how many new blocks we allocated during this GC
293 * (used for resizing the allocation area, later).
297 /* Initialise to-space in all the generations/steps that we're
300 for (g = 0; g <= N; g++) {
301 generations[g].mut_once_list = END_MUT_LIST;
302 generations[g].mut_list = END_MUT_LIST;
304 for (s = 0; s < generations[g].n_steps; s++) {
306 // generation 0, step 0 doesn't need to-space
307 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
311 /* Get a free block for to-space. Extra blocks will be chained on
315 stp = &generations[g].steps[s];
316 ASSERT(stp->gen_no == g);
317 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
321 bd->flags = BF_EVACUATED; // it's a to-space block
323 stp->hpLim = stp->hp + BLOCK_SIZE_W;
326 stp->n_to_blocks = 1;
327 stp->scan = bd->start;
329 stp->new_large_objects = NULL;
330 stp->scavenged_large_objects = NULL;
331 stp->n_scavenged_large_blocks = 0;
333 // mark the large objects as not evacuated yet
334 for (bd = stp->large_objects; bd; bd = bd->link) {
335 bd->flags = BF_LARGE;
338 // for a compacted step, we need to allocate the bitmap
339 if (stp->is_compacted) {
340 nat bitmap_size; // in bytes
341 bdescr *bitmap_bdescr;
344 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
346 if (bitmap_size > 0) {
347 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
349 stp->bitmap = bitmap_bdescr;
350 bitmap = bitmap_bdescr->start;
352 IF_DEBUG(gc, fprintf(stderr, "bitmap_size: %d, bitmap: %p\n",
353 bitmap_size, bitmap););
355 // don't forget to fill it with zeros!
356 memset(bitmap, 0, bitmap_size);
358 // for each block in this step, point to its bitmap from the
360 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
361 bd->u.bitmap = bitmap;
362 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
369 /* make sure the older generations have at least one block to
370 * allocate into (this makes things easier for copy(), see below.
372 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
373 for (s = 0; s < generations[g].n_steps; s++) {
374 stp = &generations[g].steps[s];
375 if (stp->hp_bd == NULL) {
376 ASSERT(stp->blocks == NULL);
381 bd->flags = 0; // *not* a to-space block or a large object
383 stp->hpLim = stp->hp + BLOCK_SIZE_W;
389 /* Set the scan pointer for older generations: remember we
390 * still have to scavenge objects that have been promoted. */
392 stp->scan_bd = stp->hp_bd;
393 stp->to_blocks = NULL;
394 stp->n_to_blocks = 0;
395 stp->new_large_objects = NULL;
396 stp->scavenged_large_objects = NULL;
397 stp->n_scavenged_large_blocks = 0;
401 /* Allocate a mark stack if we're doing a major collection.
404 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
405 mark_stack = (StgPtr *)mark_stack_bdescr->start;
406 mark_sp = mark_stack;
407 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
409 mark_stack_bdescr = NULL;
412 /* -----------------------------------------------------------------------
413 * follow all the roots that we know about:
414 * - mutable lists from each generation > N
415 * we want to *scavenge* these roots, not evacuate them: they're not
416 * going to move in this GC.
417 * Also: do them in reverse generation order. This is because we
418 * often want to promote objects that are pointed to by older
419 * generations early, so we don't have to repeatedly copy them.
420 * Doing the generations in reverse order ensures that we don't end
421 * up in the situation where we want to evac an object to gen 3 and
422 * it has already been evaced to gen 2.
426 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
427 generations[g].saved_mut_list = generations[g].mut_list;
428 generations[g].mut_list = END_MUT_LIST;
431 // Do the mut-once lists first
432 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
433 IF_PAR_DEBUG(verbose,
434 printMutOnceList(&generations[g]));
435 scavenge_mut_once_list(&generations[g]);
437 for (st = generations[g].n_steps-1; st >= 0; st--) {
438 scavenge(&generations[g].steps[st]);
442 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
443 IF_PAR_DEBUG(verbose,
444 printMutableList(&generations[g]));
445 scavenge_mutable_list(&generations[g]);
447 for (st = generations[g].n_steps-1; st >= 0; st--) {
448 scavenge(&generations[g].steps[st]);
455 /* follow all the roots that the application knows about.
458 get_roots(mark_root);
461 /* And don't forget to mark the TSO if we got here direct from
463 /* Not needed in a seq version?
465 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
469 // Mark the entries in the GALA table of the parallel system
470 markLocalGAs(major_gc);
471 // Mark all entries on the list of pending fetches
472 markPendingFetches(major_gc);
475 /* Mark the weak pointer list, and prepare to detect dead weak
478 old_weak_ptr_list = weak_ptr_list;
479 weak_ptr_list = NULL;
480 weak_done = rtsFalse;
482 /* The all_threads list is like the weak_ptr_list.
483 * See traverse_weak_ptr_list() for the details.
485 old_all_threads = all_threads;
486 all_threads = END_TSO_QUEUE;
487 resurrected_threads = END_TSO_QUEUE;
489 /* Mark the stable pointer table.
491 markStablePtrTable(mark_root);
495 /* ToDo: To fix the caf leak, we need to make the commented out
496 * parts of this code do something sensible - as described in
499 extern void markHugsObjects(void);
504 /* -------------------------------------------------------------------------
505 * Repeatedly scavenge all the areas we know about until there's no
506 * more scavenging to be done.
513 // scavenge static objects
514 if (major_gc && static_objects != END_OF_STATIC_LIST) {
515 IF_DEBUG(sanity, checkStaticObjects(static_objects));
519 // scavenge objects in compacted generation
520 if (mark_stack_bdescr != NULL && !mark_stack_empty()) {
521 scavenge_mark_stack();
525 /* When scavenging the older generations: Objects may have been
526 * evacuated from generations <= N into older generations, and we
527 * need to scavenge these objects. We're going to try to ensure that
528 * any evacuations that occur move the objects into at least the
529 * same generation as the object being scavenged, otherwise we
530 * have to create new entries on the mutable list for the older
534 // scavenge each step in generations 0..maxgen
539 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
540 for (st = generations[gen].n_steps; --st >= 0; ) {
541 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
544 stp = &generations[gen].steps[st];
546 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
551 if (stp->new_large_objects != NULL) {
560 if (flag) { goto loop; }
563 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
568 /* Final traversal of the weak pointer list (see comment by
569 * cleanUpWeakPtrList below).
571 cleanup_weak_ptr_list(&weak_ptr_list);
574 // Reconstruct the Global Address tables used in GUM
575 rebuildGAtables(major_gc);
576 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
579 // Now see which stable names are still alive.
582 // Tidy the end of the to-space chains
583 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
584 for (s = 0; s < generations[g].n_steps; s++) {
585 stp = &generations[g].steps[s];
586 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
587 stp->hp_bd->free = stp->hp;
588 stp->hp_bd->link = NULL;
593 // NO MORE EVACUATION AFTER THIS POINT!
594 // Finally: compaction of the oldest generation.
595 if (major_gc && RtsFlags.GcFlags.compact) {
596 // save number of blocks for stats
597 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
601 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
603 /* run through all the generations/steps and tidy up
605 copied = new_blocks * BLOCK_SIZE_W;
606 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
609 generations[g].collections++; // for stats
612 for (s = 0; s < generations[g].n_steps; s++) {
614 stp = &generations[g].steps[s];
616 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
617 // stats information: how much we copied
619 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
624 // for generations we collected...
627 // rough calculation of garbage collected, for stats output
628 if (stp->is_compacted) {
629 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
631 collected += stp->n_blocks * BLOCK_SIZE_W;
634 /* free old memory and shift to-space into from-space for all
635 * the collected steps (except the allocation area). These
636 * freed blocks will probaby be quickly recycled.
638 if (!(g == 0 && s == 0)) {
639 if (stp->is_compacted) {
640 // for a compacted step, just shift the new to-space
641 // onto the front of the now-compacted existing blocks.
642 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
643 bd->flags &= ~BF_EVACUATED; // now from-space
645 // tack the new blocks on the end of the existing blocks
646 if (stp->blocks == NULL) {
647 stp->blocks = stp->to_blocks;
649 for (bd = stp->blocks; bd != NULL; bd = next) {
652 bd->link = stp->to_blocks;
656 // add the new blocks to the block tally
657 stp->n_blocks += stp->n_to_blocks;
659 freeChain(stp->blocks);
660 stp->blocks = stp->to_blocks;
661 stp->n_blocks = stp->n_to_blocks;
662 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
663 bd->flags &= ~BF_EVACUATED; // now from-space
666 stp->to_blocks = NULL;
667 stp->n_to_blocks = 0;
670 /* LARGE OBJECTS. The current live large objects are chained on
671 * scavenged_large, having been moved during garbage
672 * collection from large_objects. Any objects left on
673 * large_objects list are therefore dead, so we free them here.
675 for (bd = stp->large_objects; bd != NULL; bd = next) {
681 // update the count of blocks used by large objects
682 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
683 bd->flags &= ~BF_EVACUATED;
685 stp->large_objects = stp->scavenged_large_objects;
686 stp->n_large_blocks = stp->n_scavenged_large_blocks;
688 /* Set the maximum blocks for this generation, interpolating
689 * between the maximum size of the oldest and youngest
692 * max_blocks = oldgen_max_blocks * G
693 * ----------------------
698 generations[g].max_blocks = (oldest_gen->max_blocks * g)
699 / (RtsFlags.GcFlags.generations-1);
701 generations[g].max_blocks = oldest_gen->max_blocks;
704 // for older generations...
707 /* For older generations, we need to append the
708 * scavenged_large_object list (i.e. large objects that have been
709 * promoted during this GC) to the large_object list for that step.
711 for (bd = stp->scavenged_large_objects; bd; bd = next) {
713 bd->flags &= ~BF_EVACUATED;
714 dbl_link_onto(bd, &stp->large_objects);
717 // add the new blocks we promoted during this GC
718 stp->n_blocks += stp->n_to_blocks;
719 stp->n_large_blocks += stp->n_scavenged_large_blocks;
724 /* Set the maximum blocks for the oldest generation, based on twice
725 * the amount of live data now, adjusted to fit the maximum heap
728 * This is an approximation, since in the worst case we'll need
729 * twice the amount of live data plus whatever space the other
732 if (major_gc && RtsFlags.GcFlags.generations > 1) {
733 oldest_gen->max_blocks =
734 stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor,
735 RtsFlags.GcFlags.minOldGenSize);
736 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
737 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
738 if (((int)oldest_gen->max_blocks -
739 (int)oldest_gen->steps[0].n_blocks) <
740 (RtsFlags.GcFlags.pcFreeHeap *
741 RtsFlags.GcFlags.maxHeapSize / 200)) {
747 // Guess the amount of live data for stats.
750 /* Free the small objects allocated via allocate(), since this will
751 * all have been copied into G0S1 now.
753 if (small_alloc_list != NULL) {
754 freeChain(small_alloc_list);
756 small_alloc_list = NULL;
760 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
762 /* Free the mark stack.
764 if (mark_stack_bdescr != NULL) {
765 freeGroup(mark_stack_bdescr);
770 for (g = 0; g <= N; g++) {
771 for (s = 0; s < generations[g].n_steps; s++) {
772 stp = &generations[g].steps[s];
773 if (stp->is_compacted && stp->bitmap != NULL) {
774 freeGroup(stp->bitmap);
779 /* Two-space collector:
780 * Free the old to-space, and estimate the amount of live data.
782 if (RtsFlags.GcFlags.generations == 1) {
785 if (old_to_blocks != NULL) {
786 freeChain(old_to_blocks);
788 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
789 bd->flags = 0; // now from-space
792 /* For a two-space collector, we need to resize the nursery. */
794 /* set up a new nursery. Allocate a nursery size based on a
795 * function of the amount of live data (currently a factor of 2,
796 * should be configurable (ToDo)). Use the blocks from the old
797 * nursery if possible, freeing up any left over blocks.
799 * If we get near the maximum heap size, then adjust our nursery
800 * size accordingly. If the nursery is the same size as the live
801 * data (L), then we need 3L bytes. We can reduce the size of the
802 * nursery to bring the required memory down near 2L bytes.
804 * A normal 2-space collector would need 4L bytes to give the same
805 * performance we get from 3L bytes, reducing to the same
806 * performance at 2L bytes.
808 blocks = g0s0->n_to_blocks;
810 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
811 RtsFlags.GcFlags.maxHeapSize ) {
812 long adjusted_blocks; // signed on purpose
815 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
816 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
817 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
818 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
821 blocks = adjusted_blocks;
824 blocks *= RtsFlags.GcFlags.oldGenFactor;
825 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
826 blocks = RtsFlags.GcFlags.minAllocAreaSize;
829 resizeNursery(blocks);
832 /* Generational collector:
833 * If the user has given us a suggested heap size, adjust our
834 * allocation area to make best use of the memory available.
837 if (RtsFlags.GcFlags.heapSizeSuggestion) {
839 nat needed = calcNeeded(); // approx blocks needed at next GC
841 /* Guess how much will be live in generation 0 step 0 next time.
842 * A good approximation is obtained by finding the
843 * percentage of g0s0 that was live at the last minor GC.
846 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
849 /* Estimate a size for the allocation area based on the
850 * information available. We might end up going slightly under
851 * or over the suggested heap size, but we should be pretty
854 * Formula: suggested - needed
855 * ----------------------------
856 * 1 + g0s0_pcnt_kept/100
858 * where 'needed' is the amount of memory needed at the next
859 * collection for collecting all steps except g0s0.
862 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
863 (100 + (long)g0s0_pcnt_kept);
865 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
866 blocks = RtsFlags.GcFlags.minAllocAreaSize;
869 resizeNursery((nat)blocks);
873 // mark the garbage collected CAFs as dead
874 #if 0 && defined(DEBUG) // doesn't work at the moment
875 if (major_gc) { gcCAFs(); }
878 // zero the scavenged static object list
880 zero_static_object_list(scavenged_static_objects);
887 // start any pending finalizers
888 scheduleFinalizers(old_weak_ptr_list);
890 // send exceptions to any threads which were about to die
891 resurrectThreads(resurrected_threads);
893 // Update the stable pointer hash table.
894 updateStablePtrTable(major_gc);
896 // check sanity after GC
897 IF_DEBUG(sanity, checkSanity());
899 // extra GC trace info
900 IF_DEBUG(gc, statDescribeGens());
903 // symbol-table based profiling
904 /* heapCensus(to_blocks); */ /* ToDo */
907 // restore enclosing cost centre
913 // check for memory leaks if sanity checking is on
914 IF_DEBUG(sanity, memInventory());
916 #ifdef RTS_GTK_FRONTPANEL
917 if (RtsFlags.GcFlags.frontpanel) {
918 updateFrontPanelAfterGC( N, live );
922 // ok, GC over: tell the stats department what happened.
923 stat_endGC(allocated, collected, live, copied, N);
929 /* -----------------------------------------------------------------------------
932 traverse_weak_ptr_list is called possibly many times during garbage
933 collection. It returns a flag indicating whether it did any work
934 (i.e. called evacuate on any live pointers).
936 Invariant: traverse_weak_ptr_list is called when the heap is in an
937 idempotent state. That means that there are no pending
938 evacuate/scavenge operations. This invariant helps the weak
939 pointer code decide which weak pointers are dead - if there are no
940 new live weak pointers, then all the currently unreachable ones are
943 For generational GC: we just don't try to finalize weak pointers in
944 older generations than the one we're collecting. This could
945 probably be optimised by keeping per-generation lists of weak
946 pointers, but for a few weak pointers this scheme will work.
947 -------------------------------------------------------------------------- */
950 traverse_weak_ptr_list(void)
952 StgWeak *w, **last_w, *next_w;
954 rtsBool flag = rtsFalse;
956 if (weak_done) { return rtsFalse; }
958 /* doesn't matter where we evacuate values/finalizers to, since
959 * these pointers are treated as roots (iff the keys are alive).
963 last_w = &old_weak_ptr_list;
964 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
966 /* First, this weak pointer might have been evacuated. If so,
967 * remove the forwarding pointer from the weak_ptr_list.
969 if (get_itbl(w)->type == EVACUATED) {
970 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
974 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
975 * called on a live weak pointer object. Just remove it.
977 if (w->header.info == &stg_DEAD_WEAK_info) {
978 next_w = ((StgDeadWeak *)w)->link;
983 ASSERT(get_itbl(w)->type == WEAK);
985 /* Now, check whether the key is reachable.
987 if ((new = isAlive(w->key))) {
989 // evacuate the value and finalizer
990 w->value = evacuate(w->value);
991 w->finalizer = evacuate(w->finalizer);
992 // remove this weak ptr from the old_weak_ptr list
994 // and put it on the new weak ptr list
996 w->link = weak_ptr_list;
999 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
1003 last_w = &(w->link);
1009 /* Now deal with the all_threads list, which behaves somewhat like
1010 * the weak ptr list. If we discover any threads that are about to
1011 * become garbage, we wake them up and administer an exception.
1014 StgTSO *t, *tmp, *next, **prev;
1016 prev = &old_all_threads;
1017 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1019 (StgClosure *)tmp = isAlive((StgClosure *)t);
1025 ASSERT(get_itbl(t)->type == TSO);
1026 switch (t->what_next) {
1027 case ThreadRelocated:
1032 case ThreadComplete:
1033 // finshed or died. The thread might still be alive, but we
1034 // don't keep it on the all_threads list. Don't forget to
1035 // stub out its global_link field.
1036 next = t->global_link;
1037 t->global_link = END_TSO_QUEUE;
1045 // not alive (yet): leave this thread on the old_all_threads list.
1046 prev = &(t->global_link);
1047 next = t->global_link;
1051 // alive: move this thread onto the all_threads list.
1052 next = t->global_link;
1053 t->global_link = all_threads;
1061 /* If we didn't make any changes, then we can go round and kill all
1062 * the dead weak pointers. The old_weak_ptr list is used as a list
1063 * of pending finalizers later on.
1065 if (flag == rtsFalse) {
1066 cleanup_weak_ptr_list(&old_weak_ptr_list);
1067 for (w = old_weak_ptr_list; w; w = w->link) {
1068 w->finalizer = evacuate(w->finalizer);
1071 /* And resurrect any threads which were about to become garbage.
1074 StgTSO *t, *tmp, *next;
1075 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1076 next = t->global_link;
1077 (StgClosure *)tmp = evacuate((StgClosure *)t);
1078 tmp->global_link = resurrected_threads;
1079 resurrected_threads = tmp;
1083 weak_done = rtsTrue;
1089 /* -----------------------------------------------------------------------------
1090 After GC, the live weak pointer list may have forwarding pointers
1091 on it, because a weak pointer object was evacuated after being
1092 moved to the live weak pointer list. We remove those forwarding
1095 Also, we don't consider weak pointer objects to be reachable, but
1096 we must nevertheless consider them to be "live" and retain them.
1097 Therefore any weak pointer objects which haven't as yet been
1098 evacuated need to be evacuated now.
1099 -------------------------------------------------------------------------- */
1103 cleanup_weak_ptr_list ( StgWeak **list )
1105 StgWeak *w, **last_w;
1108 for (w = *list; w; w = w->link) {
1110 if (get_itbl(w)->type == EVACUATED) {
1111 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1115 if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) {
1116 (StgClosure *)w = evacuate((StgClosure *)w);
1119 last_w = &(w->link);
1123 /* -----------------------------------------------------------------------------
1124 isAlive determines whether the given closure is still alive (after
1125 a garbage collection) or not. It returns the new address of the
1126 closure if it is alive, or NULL otherwise.
1128 NOTE: Use it before compaction only!
1129 -------------------------------------------------------------------------- */
1133 isAlive(StgClosure *p)
1135 const StgInfoTable *info;
1142 /* ToDo: for static closures, check the static link field.
1143 * Problem here is that we sometimes don't set the link field, eg.
1144 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1149 // ignore closures in generations that we're not collecting.
1150 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1153 // large objects have an evacuated flag
1154 if (bd->flags & BF_LARGE) {
1155 if (bd->flags & BF_EVACUATED) {
1161 // check the mark bit for compacted steps
1162 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1166 switch (info->type) {
1171 case IND_OLDGEN: // rely on compatible layout with StgInd
1172 case IND_OLDGEN_PERM:
1173 // follow indirections
1174 p = ((StgInd *)p)->indirectee;
1179 return ((StgEvacuated *)p)->evacuee;
1182 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1183 p = (StgClosure *)((StgTSO *)p)->link;
1195 mark_root(StgClosure **root)
1197 *root = evacuate(*root);
1203 bdescr *bd = allocBlock();
1204 bd->gen_no = stp->gen_no;
1207 if (stp->gen_no <= N) {
1208 bd->flags = BF_EVACUATED;
1213 stp->hp_bd->free = stp->hp;
1214 stp->hp_bd->link = bd;
1215 stp->hp = bd->start;
1216 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1223 static __inline__ void
1224 upd_evacuee(StgClosure *p, StgClosure *dest)
1226 p->header.info = &stg_EVACUATED_info;
1227 ((StgEvacuated *)p)->evacuee = dest;
1231 static __inline__ StgClosure *
1232 copy(StgClosure *src, nat size, step *stp)
1236 TICK_GC_WORDS_COPIED(size);
1237 /* Find out where we're going, using the handy "to" pointer in
1238 * the step of the source object. If it turns out we need to
1239 * evacuate to an older generation, adjust it here (see comment
1242 if (stp->gen_no < evac_gen) {
1243 #ifdef NO_EAGER_PROMOTION
1244 failed_to_evac = rtsTrue;
1246 stp = &generations[evac_gen].steps[0];
1250 /* chain a new block onto the to-space for the destination step if
1253 if (stp->hp + size >= stp->hpLim) {
1257 for(to = stp->hp, from = (P_)src; size>0; --size) {
1263 upd_evacuee(src,(StgClosure *)dest);
1264 return (StgClosure *)dest;
1267 /* Special version of copy() for when we only want to copy the info
1268 * pointer of an object, but reserve some padding after it. This is
1269 * used to optimise evacuation of BLACKHOLEs.
1273 static __inline__ StgClosure *
1274 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1278 TICK_GC_WORDS_COPIED(size_to_copy);
1279 if (stp->gen_no < evac_gen) {
1280 #ifdef NO_EAGER_PROMOTION
1281 failed_to_evac = rtsTrue;
1283 stp = &generations[evac_gen].steps[0];
1287 if (stp->hp + size_to_reserve >= stp->hpLim) {
1291 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1296 stp->hp += size_to_reserve;
1297 upd_evacuee(src,(StgClosure *)dest);
1298 return (StgClosure *)dest;
1302 /* -----------------------------------------------------------------------------
1303 Evacuate a large object
1305 This just consists of removing the object from the (doubly-linked)
1306 large_alloc_list, and linking it on to the (singly-linked)
1307 new_large_objects list, from where it will be scavenged later.
1309 Convention: bd->flags has BF_EVACUATED set for a large object
1310 that has been evacuated, or unset otherwise.
1311 -------------------------------------------------------------------------- */
1315 evacuate_large(StgPtr p)
1317 bdescr *bd = Bdescr(p);
1320 // should point to the beginning of the block
1321 ASSERT(((W_)p & BLOCK_MASK) == 0);
1323 // already evacuated?
1324 if (bd->flags & BF_EVACUATED) {
1325 /* Don't forget to set the failed_to_evac flag if we didn't get
1326 * the desired destination (see comments in evacuate()).
1328 if (bd->gen_no < evac_gen) {
1329 failed_to_evac = rtsTrue;
1330 TICK_GC_FAILED_PROMOTION();
1336 // remove from large_object list
1338 bd->u.back->link = bd->link;
1339 } else { // first object in the list
1340 stp->large_objects = bd->link;
1343 bd->link->u.back = bd->u.back;
1346 /* link it on to the evacuated large object list of the destination step
1349 if (stp->gen_no < evac_gen) {
1350 #ifdef NO_EAGER_PROMOTION
1351 failed_to_evac = rtsTrue;
1353 stp = &generations[evac_gen].steps[0];
1358 bd->gen_no = stp->gen_no;
1359 bd->link = stp->new_large_objects;
1360 stp->new_large_objects = bd;
1361 bd->flags |= BF_EVACUATED;
1364 /* -----------------------------------------------------------------------------
1365 Adding a MUT_CONS to an older generation.
1367 This is necessary from time to time when we end up with an
1368 old-to-new generation pointer in a non-mutable object. We defer
1369 the promotion until the next GC.
1370 -------------------------------------------------------------------------- */
1374 mkMutCons(StgClosure *ptr, generation *gen)
1379 stp = &gen->steps[0];
1381 /* chain a new block onto the to-space for the destination step if
1384 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1388 q = (StgMutVar *)stp->hp;
1389 stp->hp += sizeofW(StgMutVar);
1391 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1393 recordOldToNewPtrs((StgMutClosure *)q);
1395 return (StgClosure *)q;
1398 /* -----------------------------------------------------------------------------
1401 This is called (eventually) for every live object in the system.
1403 The caller to evacuate specifies a desired generation in the
1404 evac_gen global variable. The following conditions apply to
1405 evacuating an object which resides in generation M when we're
1406 collecting up to generation N
1410 else evac to step->to
1412 if M < evac_gen evac to evac_gen, step 0
1414 if the object is already evacuated, then we check which generation
1417 if M >= evac_gen do nothing
1418 if M < evac_gen set failed_to_evac flag to indicate that we
1419 didn't manage to evacuate this object into evac_gen.
1421 -------------------------------------------------------------------------- */
1424 evacuate(StgClosure *q)
1429 const StgInfoTable *info;
1432 if (HEAP_ALLOCED(q)) {
1435 if (bd->gen_no > N) {
1436 /* Can't evacuate this object, because it's in a generation
1437 * older than the ones we're collecting. Let's hope that it's
1438 * in evac_gen or older, or we will have to arrange to track
1439 * this pointer using the mutable list.
1441 if (bd->gen_no < evac_gen) {
1443 failed_to_evac = rtsTrue;
1444 TICK_GC_FAILED_PROMOTION();
1449 /* evacuate large objects by re-linking them onto a different list.
1451 if (bd->flags & BF_LARGE) {
1453 if (info->type == TSO &&
1454 ((StgTSO *)q)->what_next == ThreadRelocated) {
1455 q = (StgClosure *)((StgTSO *)q)->link;
1458 evacuate_large((P_)q);
1462 /* If the object is in a step that we're compacting, then we
1463 * need to use an alternative evacuate procedure.
1465 if (bd->step->is_compacted) {
1466 if (!is_marked((P_)q,bd)) {
1468 if (mark_stack_full()) {
1469 barf("ToDo: mark stack full");
1471 push_mark_stack((P_)q);
1479 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1482 // make sure the info pointer is into text space
1483 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1484 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1487 switch (info -> type) {
1491 to = copy(q,sizeW_fromITBL(info),stp);
1496 StgWord w = (StgWord)q->payload[0];
1497 if (q->header.info == Czh_con_info &&
1498 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1499 (StgChar)w <= MAX_CHARLIKE) {
1500 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1502 if (q->header.info == Izh_con_info &&
1503 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1504 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1506 // else, fall through ...
1512 return copy(q,sizeofW(StgHeader)+1,stp);
1514 case THUNK_1_0: // here because of MIN_UPD_SIZE
1519 #ifdef NO_PROMOTE_THUNKS
1520 if (bd->gen_no == 0 &&
1521 bd->step->no != 0 &&
1522 bd->step->no == generations[bd->gen_no].n_steps-1) {
1526 return copy(q,sizeofW(StgHeader)+2,stp);
1534 return copy(q,sizeofW(StgHeader)+2,stp);
1540 case IND_OLDGEN_PERM:
1545 return copy(q,sizeW_fromITBL(info),stp);
1548 case SE_CAF_BLACKHOLE:
1551 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1554 to = copy(q,BLACKHOLE_sizeW(),stp);
1557 case THUNK_SELECTOR:
1559 const StgInfoTable* selectee_info;
1560 StgClosure* selectee = ((StgSelector*)q)->selectee;
1563 selectee_info = get_itbl(selectee);
1564 switch (selectee_info->type) {
1573 StgWord offset = info->layout.selector_offset;
1575 // check that the size is in range
1577 (StgWord32)(selectee_info->layout.payload.ptrs +
1578 selectee_info->layout.payload.nptrs));
1580 // perform the selection!
1581 q = selectee->payload[offset];
1583 /* if we're already in to-space, there's no need to continue
1584 * with the evacuation, just update the source address with
1585 * a pointer to the (evacuated) constructor field.
1587 if (HEAP_ALLOCED(q)) {
1588 bdescr *bd = Bdescr((P_)q);
1589 if (bd->flags & BF_EVACUATED) {
1590 if (bd->gen_no < evac_gen) {
1591 failed_to_evac = rtsTrue;
1592 TICK_GC_FAILED_PROMOTION();
1598 /* otherwise, carry on and evacuate this constructor field,
1599 * (but not the constructor itself)
1608 case IND_OLDGEN_PERM:
1609 selectee = ((StgInd *)selectee)->indirectee;
1613 selectee = ((StgEvacuated *)selectee)->evacuee;
1616 case THUNK_SELECTOR:
1618 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1619 something) to go into an infinite loop when the nightly
1620 stage2 compiles PrelTup.lhs. */
1622 /* we can't recurse indefinitely in evacuate(), so set a
1623 * limit on the number of times we can go around this
1626 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1628 bd = Bdescr((P_)selectee);
1629 if (!bd->flags & BF_EVACUATED) {
1630 thunk_selector_depth++;
1631 selectee = evacuate(selectee);
1632 thunk_selector_depth--;
1636 // otherwise, fall through...
1648 case SE_CAF_BLACKHOLE:
1652 // not evaluated yet
1656 // a copy of the top-level cases below
1657 case RBH: // cf. BLACKHOLE_BQ
1659 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1660 to = copy(q,BLACKHOLE_sizeW(),stp);
1661 //ToDo: derive size etc from reverted IP
1662 //to = copy(q,size,stp);
1663 // recordMutable((StgMutClosure *)to);
1668 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1669 to = copy(q,sizeofW(StgBlockedFetch),stp);
1676 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1677 to = copy(q,sizeofW(StgFetchMe),stp);
1681 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1682 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1687 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1688 (int)(selectee_info->type));
1691 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1695 // follow chains of indirections, don't evacuate them
1696 q = ((StgInd*)q)->indirectee;
1700 if (info->srt_len > 0 && major_gc &&
1701 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1702 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1703 static_objects = (StgClosure *)q;
1708 if (info->srt_len > 0 && major_gc &&
1709 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1710 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1711 static_objects = (StgClosure *)q;
1716 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1717 * on the CAF list, so don't do anything with it here (we'll
1718 * scavenge it later).
1721 && ((StgIndStatic *)q)->saved_info == NULL
1722 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1723 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1724 static_objects = (StgClosure *)q;
1729 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1730 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1731 static_objects = (StgClosure *)q;
1735 case CONSTR_INTLIKE:
1736 case CONSTR_CHARLIKE:
1737 case CONSTR_NOCAF_STATIC:
1738 /* no need to put these on the static linked list, they don't need
1753 // shouldn't see these
1754 barf("evacuate: stack frame at %p\n", q);
1758 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1759 * of stack, tagging and all.
1761 return copy(q,pap_sizeW((StgPAP*)q),stp);
1764 /* Already evacuated, just return the forwarding address.
1765 * HOWEVER: if the requested destination generation (evac_gen) is
1766 * older than the actual generation (because the object was
1767 * already evacuated to a younger generation) then we have to
1768 * set the failed_to_evac flag to indicate that we couldn't
1769 * manage to promote the object to the desired generation.
1771 if (evac_gen > 0) { // optimisation
1772 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1773 if (Bdescr((P_)p)->gen_no < evac_gen) {
1774 failed_to_evac = rtsTrue;
1775 TICK_GC_FAILED_PROMOTION();
1778 return ((StgEvacuated*)q)->evacuee;
1781 // just copy the block
1782 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1785 case MUT_ARR_PTRS_FROZEN:
1786 // just copy the block
1787 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1791 StgTSO *tso = (StgTSO *)q;
1793 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1795 if (tso->what_next == ThreadRelocated) {
1796 q = (StgClosure *)tso->link;
1800 /* To evacuate a small TSO, we need to relocate the update frame
1804 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1805 move_TSO(tso, new_tso);
1806 return (StgClosure *)new_tso;
1811 case RBH: // cf. BLACKHOLE_BQ
1813 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1814 to = copy(q,BLACKHOLE_sizeW(),stp);
1815 //ToDo: derive size etc from reverted IP
1816 //to = copy(q,size,stp);
1818 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1819 q, info_type(q), to, info_type(to)));
1824 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1825 to = copy(q,sizeofW(StgBlockedFetch),stp);
1827 belch("@@ evacuate: %p (%s) to %p (%s)",
1828 q, info_type(q), to, info_type(to)));
1835 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1836 to = copy(q,sizeofW(StgFetchMe),stp);
1838 belch("@@ evacuate: %p (%s) to %p (%s)",
1839 q, info_type(q), to, info_type(to)));
1843 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1844 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1846 belch("@@ evacuate: %p (%s) to %p (%s)",
1847 q, info_type(q), to, info_type(to)));
1852 barf("evacuate: strange closure type %d", (int)(info->type));
1858 /* -----------------------------------------------------------------------------
1859 move_TSO is called to update the TSO structure after it has been
1860 moved from one place to another.
1861 -------------------------------------------------------------------------- */
1864 move_TSO(StgTSO *src, StgTSO *dest)
1868 // relocate the stack pointers...
1869 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1870 dest->sp = (StgPtr)dest->sp + diff;
1871 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1873 relocate_stack(dest, diff);
1876 /* -----------------------------------------------------------------------------
1877 relocate_stack is called to update the linkage between
1878 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1880 -------------------------------------------------------------------------- */
1883 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1891 while ((P_)su < dest->stack + dest->stack_size) {
1892 switch (get_itbl(su)->type) {
1894 // GCC actually manages to common up these three cases!
1897 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1902 cf = (StgCatchFrame *)su;
1903 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1908 sf = (StgSeqFrame *)su;
1909 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1918 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1929 scavenge_srt(const StgInfoTable *info)
1931 StgClosure **srt, **srt_end;
1933 /* evacuate the SRT. If srt_len is zero, then there isn't an
1934 * srt field in the info table. That's ok, because we'll
1935 * never dereference it.
1937 srt = (StgClosure **)(info->srt);
1938 srt_end = srt + info->srt_len;
1939 for (; srt < srt_end; srt++) {
1940 /* Special-case to handle references to closures hiding out in DLLs, since
1941 double indirections required to get at those. The code generator knows
1942 which is which when generating the SRT, so it stores the (indirect)
1943 reference to the DLL closure in the table by first adding one to it.
1944 We check for this here, and undo the addition before evacuating it.
1946 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1947 closure that's fixed at link-time, and no extra magic is required.
1949 #ifdef ENABLE_WIN32_DLL_SUPPORT
1950 if ( (unsigned long)(*srt) & 0x1 ) {
1951 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1961 /* -----------------------------------------------------------------------------
1963 -------------------------------------------------------------------------- */
1966 scavengeTSO (StgTSO *tso)
1968 // chase the link field for any TSOs on the same queue
1969 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1970 if ( tso->why_blocked == BlockedOnMVar
1971 || tso->why_blocked == BlockedOnBlackHole
1972 || tso->why_blocked == BlockedOnException
1974 || tso->why_blocked == BlockedOnGA
1975 || tso->why_blocked == BlockedOnGA_NoSend
1978 tso->block_info.closure = evacuate(tso->block_info.closure);
1980 if ( tso->blocked_exceptions != NULL ) {
1981 tso->blocked_exceptions =
1982 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1984 // scavenge this thread's stack
1985 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1988 /* -----------------------------------------------------------------------------
1989 Scavenge a given step until there are no more objects in this step
1992 evac_gen is set by the caller to be either zero (for a step in a
1993 generation < N) or G where G is the generation of the step being
1996 We sometimes temporarily change evac_gen back to zero if we're
1997 scavenging a mutable object where early promotion isn't such a good
1999 -------------------------------------------------------------------------- */
2007 nat saved_evac_gen = evac_gen;
2012 failed_to_evac = rtsFalse;
2014 /* scavenge phase - standard breadth-first scavenging of the
2018 while (bd != stp->hp_bd || p < stp->hp) {
2020 // If we're at the end of this block, move on to the next block
2021 if (bd != stp->hp_bd && p == bd->free) {
2027 info = get_itbl((StgClosure *)p);
2028 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2031 switch (info->type) {
2034 /* treat MVars specially, because we don't want to evacuate the
2035 * mut_link field in the middle of the closure.
2038 StgMVar *mvar = ((StgMVar *)p);
2040 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2041 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2042 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2043 evac_gen = saved_evac_gen;
2044 recordMutable((StgMutClosure *)mvar);
2045 failed_to_evac = rtsFalse; // mutable.
2046 p += sizeofW(StgMVar);
2054 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2055 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2056 p += sizeofW(StgHeader) + 2;
2061 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2062 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2068 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2069 p += sizeofW(StgHeader) + 1;
2074 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2080 p += sizeofW(StgHeader) + 1;
2087 p += sizeofW(StgHeader) + 2;
2094 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2095 p += sizeofW(StgHeader) + 2;
2111 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2112 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2113 (StgClosure *)*p = evacuate((StgClosure *)*p);
2115 p += info->layout.payload.nptrs;
2120 if (stp->gen_no != 0) {
2121 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2124 case IND_OLDGEN_PERM:
2125 ((StgIndOldGen *)p)->indirectee =
2126 evacuate(((StgIndOldGen *)p)->indirectee);
2127 if (failed_to_evac) {
2128 failed_to_evac = rtsFalse;
2129 recordOldToNewPtrs((StgMutClosure *)p);
2131 p += sizeofW(StgIndOldGen);
2136 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2137 evac_gen = saved_evac_gen;
2138 recordMutable((StgMutClosure *)p);
2139 failed_to_evac = rtsFalse; // mutable anyhow
2140 p += sizeofW(StgMutVar);
2145 failed_to_evac = rtsFalse; // mutable anyhow
2146 p += sizeofW(StgMutVar);
2150 case SE_CAF_BLACKHOLE:
2153 p += BLACKHOLE_sizeW();
2158 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2159 (StgClosure *)bh->blocking_queue =
2160 evacuate((StgClosure *)bh->blocking_queue);
2161 recordMutable((StgMutClosure *)bh);
2162 failed_to_evac = rtsFalse;
2163 p += BLACKHOLE_sizeW();
2167 case THUNK_SELECTOR:
2169 StgSelector *s = (StgSelector *)p;
2170 s->selectee = evacuate(s->selectee);
2171 p += THUNK_SELECTOR_sizeW();
2175 case AP_UPD: // same as PAPs
2177 /* Treat a PAP just like a section of stack, not forgetting to
2178 * evacuate the function pointer too...
2181 StgPAP* pap = (StgPAP *)p;
2183 pap->fun = evacuate(pap->fun);
2184 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2185 p += pap_sizeW(pap);
2190 // nothing to follow
2191 p += arr_words_sizeW((StgArrWords *)p);
2195 // follow everything
2199 evac_gen = 0; // repeatedly mutable
2200 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2201 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2202 (StgClosure *)*p = evacuate((StgClosure *)*p);
2204 evac_gen = saved_evac_gen;
2205 recordMutable((StgMutClosure *)q);
2206 failed_to_evac = rtsFalse; // mutable anyhow.
2210 case MUT_ARR_PTRS_FROZEN:
2211 // follow everything
2215 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2216 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2217 (StgClosure *)*p = evacuate((StgClosure *)*p);
2219 // it's tempting to recordMutable() if failed_to_evac is
2220 // false, but that breaks some assumptions (eg. every
2221 // closure on the mutable list is supposed to have the MUT
2222 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2228 StgTSO *tso = (StgTSO *)p;
2231 evac_gen = saved_evac_gen;
2232 recordMutable((StgMutClosure *)tso);
2233 failed_to_evac = rtsFalse; // mutable anyhow.
2234 p += tso_sizeW(tso);
2239 case RBH: // cf. BLACKHOLE_BQ
2242 nat size, ptrs, nonptrs, vhs;
2244 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2246 StgRBH *rbh = (StgRBH *)p;
2247 (StgClosure *)rbh->blocking_queue =
2248 evacuate((StgClosure *)rbh->blocking_queue);
2249 recordMutable((StgMutClosure *)to);
2250 failed_to_evac = rtsFalse; // mutable anyhow.
2252 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2253 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2254 // ToDo: use size of reverted closure here!
2255 p += BLACKHOLE_sizeW();
2261 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2262 // follow the pointer to the node which is being demanded
2263 (StgClosure *)bf->node =
2264 evacuate((StgClosure *)bf->node);
2265 // follow the link to the rest of the blocking queue
2266 (StgClosure *)bf->link =
2267 evacuate((StgClosure *)bf->link);
2268 if (failed_to_evac) {
2269 failed_to_evac = rtsFalse;
2270 recordMutable((StgMutClosure *)bf);
2273 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2274 bf, info_type((StgClosure *)bf),
2275 bf->node, info_type(bf->node)));
2276 p += sizeofW(StgBlockedFetch);
2284 p += sizeofW(StgFetchMe);
2285 break; // nothing to do in this case
2287 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2289 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2290 (StgClosure *)fmbq->blocking_queue =
2291 evacuate((StgClosure *)fmbq->blocking_queue);
2292 if (failed_to_evac) {
2293 failed_to_evac = rtsFalse;
2294 recordMutable((StgMutClosure *)fmbq);
2297 belch("@@ scavenge: %p (%s) exciting, isn't it",
2298 p, info_type((StgClosure *)p)));
2299 p += sizeofW(StgFetchMeBlockingQueue);
2305 barf("scavenge: unimplemented/strange closure type %d @ %p",
2309 /* If we didn't manage to promote all the objects pointed to by
2310 * the current object, then we have to designate this object as
2311 * mutable (because it contains old-to-new generation pointers).
2313 if (failed_to_evac) {
2314 failed_to_evac = rtsFalse;
2315 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2323 /* -----------------------------------------------------------------------------
2324 Scavenge everything on the mark stack.
2326 This is slightly different from scavenge():
2327 - we don't walk linearly through the objects, so the scavenger
2328 doesn't need to advance the pointer on to the next object.
2329 -------------------------------------------------------------------------- */
2332 scavenge_mark_stack(void)
2338 evac_gen = oldest_gen->no;
2339 saved_evac_gen = evac_gen;
2341 while (!mark_stack_empty()) {
2342 p = pop_mark_stack();
2344 info = get_itbl((StgClosure *)p);
2345 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2347 switch (info->type) {
2350 /* treat MVars specially, because we don't want to evacuate the
2351 * mut_link field in the middle of the closure.
2354 StgMVar *mvar = ((StgMVar *)p);
2356 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2357 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2358 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2359 evac_gen = saved_evac_gen;
2360 failed_to_evac = rtsFalse; // mutable.
2368 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2369 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2370 mark(p+1,Bdescr(p));
2380 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2381 mark(p+1,Bdescr(p));
2391 mark(p+1,Bdescr(p));
2407 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2408 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2409 (StgClosure *)*p = evacuate((StgClosure *)*p);
2415 // don't need to do anything here: the only possible case
2416 // is that we're in a 1-space compacting collector, with
2417 // no "old" generation.
2421 case IND_OLDGEN_PERM:
2422 ((StgIndOldGen *)p)->indirectee =
2423 evacuate(((StgIndOldGen *)p)->indirectee);
2424 if (failed_to_evac) {
2425 recordOldToNewPtrs((StgMutClosure *)p);
2427 failed_to_evac = rtsFalse;
2428 mark(p+1,Bdescr(p));
2433 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2434 evac_gen = saved_evac_gen;
2435 failed_to_evac = rtsFalse;
2436 mark(p+1,Bdescr(p));
2441 failed_to_evac = rtsFalse;
2445 case SE_CAF_BLACKHOLE:
2453 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2454 (StgClosure *)bh->blocking_queue =
2455 evacuate((StgClosure *)bh->blocking_queue);
2456 failed_to_evac = rtsFalse;
2460 case THUNK_SELECTOR:
2462 StgSelector *s = (StgSelector *)p;
2463 s->selectee = evacuate(s->selectee);
2467 case AP_UPD: // same as PAPs
2469 /* Treat a PAP just like a section of stack, not forgetting to
2470 * evacuate the function pointer too...
2473 StgPAP* pap = (StgPAP *)p;
2475 pap->fun = evacuate(pap->fun);
2476 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2481 // follow everything
2485 evac_gen = 0; // repeatedly mutable
2486 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2487 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2488 (StgClosure *)*p = evacuate((StgClosure *)*p);
2490 evac_gen = saved_evac_gen;
2491 failed_to_evac = rtsFalse; // mutable anyhow.
2495 case MUT_ARR_PTRS_FROZEN:
2496 // follow everything
2500 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2501 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2502 (StgClosure *)*p = evacuate((StgClosure *)*p);
2509 StgTSO *tso = (StgTSO *)p;
2512 evac_gen = saved_evac_gen;
2513 failed_to_evac = rtsFalse;
2518 case RBH: // cf. BLACKHOLE_BQ
2521 nat size, ptrs, nonptrs, vhs;
2523 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2525 StgRBH *rbh = (StgRBH *)p;
2526 (StgClosure *)rbh->blocking_queue =
2527 evacuate((StgClosure *)rbh->blocking_queue);
2528 recordMutable((StgMutClosure *)rbh);
2529 failed_to_evac = rtsFalse; // mutable anyhow.
2531 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2532 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2538 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2539 // follow the pointer to the node which is being demanded
2540 (StgClosure *)bf->node =
2541 evacuate((StgClosure *)bf->node);
2542 // follow the link to the rest of the blocking queue
2543 (StgClosure *)bf->link =
2544 evacuate((StgClosure *)bf->link);
2545 if (failed_to_evac) {
2546 failed_to_evac = rtsFalse;
2547 recordMutable((StgMutClosure *)bf);
2550 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2551 bf, info_type((StgClosure *)bf),
2552 bf->node, info_type(bf->node)));
2560 break; // nothing to do in this case
2562 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2564 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2565 (StgClosure *)fmbq->blocking_queue =
2566 evacuate((StgClosure *)fmbq->blocking_queue);
2567 if (failed_to_evac) {
2568 failed_to_evac = rtsFalse;
2569 recordMutable((StgMutClosure *)fmbq);
2572 belch("@@ scavenge: %p (%s) exciting, isn't it",
2573 p, info_type((StgClosure *)p)));
2579 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2583 if (failed_to_evac) {
2584 failed_to_evac = rtsFalse;
2585 mkMutCons((StgClosure *)p, &generations[evac_gen]);
2588 } // while (!mark_stack_empty())
2591 /* -----------------------------------------------------------------------------
2592 Scavenge one object.
2594 This is used for objects that are temporarily marked as mutable
2595 because they contain old-to-new generation pointers. Only certain
2596 objects can have this property.
2597 -------------------------------------------------------------------------- */
2600 scavenge_one(StgClosure *p)
2602 const StgInfoTable *info;
2605 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2606 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2610 switch (info -> type) {
2613 case FUN_1_0: // hardly worth specialising these guys
2633 case IND_OLDGEN_PERM:
2637 end = (P_)p->payload + info->layout.payload.ptrs;
2638 for (q = (P_)p->payload; q < end; q++) {
2639 (StgClosure *)*q = evacuate((StgClosure *)*q);
2645 case SE_CAF_BLACKHOLE:
2650 case THUNK_SELECTOR:
2652 StgSelector *s = (StgSelector *)p;
2653 s->selectee = evacuate(s->selectee);
2657 case AP_UPD: /* same as PAPs */
2659 /* Treat a PAP just like a section of stack, not forgetting to
2660 * evacuate the function pointer too...
2663 StgPAP* pap = (StgPAP *)p;
2665 pap->fun = evacuate(pap->fun);
2666 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2671 /* This might happen if for instance a MUT_CONS was pointing to a
2672 * THUNK which has since been updated. The IND_OLDGEN will
2673 * be on the mutable list anyway, so we don't need to do anything
2678 case MUT_ARR_PTRS_FROZEN:
2680 // follow everything
2684 next = q + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2685 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < next; q++) {
2686 (StgClosure *)*q = evacuate((StgClosure *)*q);
2692 barf("scavenge_one: strange object %d", (int)(info->type));
2695 no_luck = failed_to_evac;
2696 failed_to_evac = rtsFalse;
2700 /* -----------------------------------------------------------------------------
2701 Scavenging mutable lists.
2703 We treat the mutable list of each generation > N (i.e. all the
2704 generations older than the one being collected) as roots. We also
2705 remove non-mutable objects from the mutable list at this point.
2706 -------------------------------------------------------------------------- */
2709 scavenge_mut_once_list(generation *gen)
2711 const StgInfoTable *info;
2712 StgMutClosure *p, *next, *new_list;
2714 p = gen->mut_once_list;
2715 new_list = END_MUT_LIST;
2719 failed_to_evac = rtsFalse;
2721 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2723 // make sure the info pointer is into text space
2724 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2725 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2729 if (info->type==RBH)
2730 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2732 switch(info->type) {
2735 case IND_OLDGEN_PERM:
2737 /* Try to pull the indirectee into this generation, so we can
2738 * remove the indirection from the mutable list.
2740 ((StgIndOldGen *)p)->indirectee =
2741 evacuate(((StgIndOldGen *)p)->indirectee);
2743 #if 0 && defined(DEBUG)
2744 if (RtsFlags.DebugFlags.gc)
2745 /* Debugging code to print out the size of the thing we just
2749 StgPtr start = gen->steps[0].scan;
2750 bdescr *start_bd = gen->steps[0].scan_bd;
2752 scavenge(&gen->steps[0]);
2753 if (start_bd != gen->steps[0].scan_bd) {
2754 size += (P_)BLOCK_ROUND_UP(start) - start;
2755 start_bd = start_bd->link;
2756 while (start_bd != gen->steps[0].scan_bd) {
2757 size += BLOCK_SIZE_W;
2758 start_bd = start_bd->link;
2760 size += gen->steps[0].scan -
2761 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2763 size = gen->steps[0].scan - start;
2765 fprintf(stderr,"evac IND_OLDGEN: %ld bytes\n", size * sizeof(W_));
2769 /* failed_to_evac might happen if we've got more than two
2770 * generations, we're collecting only generation 0, the
2771 * indirection resides in generation 2 and the indirectee is
2774 if (failed_to_evac) {
2775 failed_to_evac = rtsFalse;
2776 p->mut_link = new_list;
2779 /* the mut_link field of an IND_STATIC is overloaded as the
2780 * static link field too (it just so happens that we don't need
2781 * both at the same time), so we need to NULL it out when
2782 * removing this object from the mutable list because the static
2783 * link fields are all assumed to be NULL before doing a major
2791 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2792 * it from the mutable list if possible by promoting whatever it
2795 if (scavenge_one((StgClosure *)((StgMutVar *)p)->var)) {
2796 /* didn't manage to promote everything, so put the
2797 * MUT_CONS back on the list.
2799 p->mut_link = new_list;
2805 // shouldn't have anything else on the mutables list
2806 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2810 gen->mut_once_list = new_list;
2815 scavenge_mutable_list(generation *gen)
2817 const StgInfoTable *info;
2818 StgMutClosure *p, *next;
2820 p = gen->saved_mut_list;
2824 failed_to_evac = rtsFalse;
2826 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2828 // make sure the info pointer is into text space
2829 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2830 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2834 if (info->type==RBH)
2835 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2837 switch(info->type) {
2840 // follow everything
2841 p->mut_link = gen->mut_list;
2846 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2847 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2848 (StgClosure *)*q = evacuate((StgClosure *)*q);
2853 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2854 case MUT_ARR_PTRS_FROZEN:
2859 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2860 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2861 (StgClosure *)*q = evacuate((StgClosure *)*q);
2865 if (failed_to_evac) {
2866 failed_to_evac = rtsFalse;
2867 mkMutCons((StgClosure *)p, gen);
2873 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2874 p->mut_link = gen->mut_list;
2880 StgMVar *mvar = (StgMVar *)p;
2881 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2882 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2883 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2884 p->mut_link = gen->mut_list;
2891 StgTSO *tso = (StgTSO *)p;
2895 /* Don't take this TSO off the mutable list - it might still
2896 * point to some younger objects (because we set evac_gen to 0
2899 tso->mut_link = gen->mut_list;
2900 gen->mut_list = (StgMutClosure *)tso;
2906 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2907 (StgClosure *)bh->blocking_queue =
2908 evacuate((StgClosure *)bh->blocking_queue);
2909 p->mut_link = gen->mut_list;
2914 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2917 case IND_OLDGEN_PERM:
2918 /* Try to pull the indirectee into this generation, so we can
2919 * remove the indirection from the mutable list.
2922 ((StgIndOldGen *)p)->indirectee =
2923 evacuate(((StgIndOldGen *)p)->indirectee);
2926 if (failed_to_evac) {
2927 failed_to_evac = rtsFalse;
2928 p->mut_link = gen->mut_once_list;
2929 gen->mut_once_list = p;
2936 // HWL: check whether all of these are necessary
2938 case RBH: // cf. BLACKHOLE_BQ
2940 // nat size, ptrs, nonptrs, vhs;
2942 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2943 StgRBH *rbh = (StgRBH *)p;
2944 (StgClosure *)rbh->blocking_queue =
2945 evacuate((StgClosure *)rbh->blocking_queue);
2946 if (failed_to_evac) {
2947 failed_to_evac = rtsFalse;
2948 recordMutable((StgMutClosure *)rbh);
2950 // ToDo: use size of reverted closure here!
2951 p += BLACKHOLE_sizeW();
2957 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2958 // follow the pointer to the node which is being demanded
2959 (StgClosure *)bf->node =
2960 evacuate((StgClosure *)bf->node);
2961 // follow the link to the rest of the blocking queue
2962 (StgClosure *)bf->link =
2963 evacuate((StgClosure *)bf->link);
2964 if (failed_to_evac) {
2965 failed_to_evac = rtsFalse;
2966 recordMutable((StgMutClosure *)bf);
2968 p += sizeofW(StgBlockedFetch);
2974 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
2977 p += sizeofW(StgFetchMe);
2978 break; // nothing to do in this case
2980 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2982 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2983 (StgClosure *)fmbq->blocking_queue =
2984 evacuate((StgClosure *)fmbq->blocking_queue);
2985 if (failed_to_evac) {
2986 failed_to_evac = rtsFalse;
2987 recordMutable((StgMutClosure *)fmbq);
2989 p += sizeofW(StgFetchMeBlockingQueue);
2995 // shouldn't have anything else on the mutables list
2996 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3003 scavenge_static(void)
3005 StgClosure* p = static_objects;
3006 const StgInfoTable *info;
3008 /* Always evacuate straight to the oldest generation for static
3010 evac_gen = oldest_gen->no;
3012 /* keep going until we've scavenged all the objects on the linked
3014 while (p != END_OF_STATIC_LIST) {
3018 if (info->type==RBH)
3019 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3021 // make sure the info pointer is into text space
3022 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3023 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3025 /* Take this object *off* the static_objects list,
3026 * and put it on the scavenged_static_objects list.
3028 static_objects = STATIC_LINK(info,p);
3029 STATIC_LINK(info,p) = scavenged_static_objects;
3030 scavenged_static_objects = p;
3032 switch (info -> type) {
3036 StgInd *ind = (StgInd *)p;
3037 ind->indirectee = evacuate(ind->indirectee);
3039 /* might fail to evacuate it, in which case we have to pop it
3040 * back on the mutable list (and take it off the
3041 * scavenged_static list because the static link and mut link
3042 * pointers are one and the same).
3044 if (failed_to_evac) {
3045 failed_to_evac = rtsFalse;
3046 scavenged_static_objects = STATIC_LINK(info,p);
3047 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3048 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3062 next = (P_)p->payload + info->layout.payload.ptrs;
3063 // evacuate the pointers
3064 for (q = (P_)p->payload; q < next; q++) {
3065 (StgClosure *)*q = evacuate((StgClosure *)*q);
3071 barf("scavenge_static: strange closure %d", (int)(info->type));
3074 ASSERT(failed_to_evac == rtsFalse);
3076 /* get the next static object from the list. Remember, there might
3077 * be more stuff on this list now that we've done some evacuating!
3078 * (static_objects is a global)
3084 /* -----------------------------------------------------------------------------
3085 scavenge_stack walks over a section of stack and evacuates all the
3086 objects pointed to by it. We can use the same code for walking
3087 PAPs, since these are just sections of copied stack.
3088 -------------------------------------------------------------------------- */
3091 scavenge_stack(StgPtr p, StgPtr stack_end)
3094 const StgInfoTable* info;
3097 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3100 * Each time around this loop, we are looking at a chunk of stack
3101 * that starts with either a pending argument section or an
3102 * activation record.
3105 while (p < stack_end) {
3108 // If we've got a tag, skip over that many words on the stack
3109 if (IS_ARG_TAG((W_)q)) {
3114 /* Is q a pointer to a closure?
3116 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3118 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3119 ASSERT(closure_STATIC((StgClosure *)q));
3121 // otherwise, must be a pointer into the allocation space.
3124 (StgClosure *)*p = evacuate((StgClosure *)q);
3130 * Otherwise, q must be the info pointer of an activation
3131 * record. All activation records have 'bitmap' style layout
3134 info = get_itbl((StgClosure *)p);
3136 switch (info->type) {
3138 // Dynamic bitmap: the mask is stored on the stack
3140 bitmap = ((StgRetDyn *)p)->liveness;
3141 p = (P_)&((StgRetDyn *)p)->payload[0];
3144 // probably a slow-entry point return address:
3152 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3153 old_p, p, old_p+1));
3155 p++; // what if FHS!=1 !? -- HWL
3160 /* Specialised code for update frames, since they're so common.
3161 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3162 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3166 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3168 p += sizeofW(StgUpdateFrame);
3171 frame->updatee = evacuate(frame->updatee);
3173 #else // specialised code for update frames, not sure if it's worth it.
3175 nat type = get_itbl(frame->updatee)->type;
3177 if (type == EVACUATED) {
3178 frame->updatee = evacuate(frame->updatee);
3181 bdescr *bd = Bdescr((P_)frame->updatee);
3183 if (bd->gen_no > N) {
3184 if (bd->gen_no < evac_gen) {
3185 failed_to_evac = rtsTrue;
3190 // Don't promote blackholes
3192 if (!(stp->gen_no == 0 &&
3194 stp->no == stp->gen->n_steps-1)) {
3201 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3202 sizeofW(StgHeader), stp);
3203 frame->updatee = to;
3206 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3207 frame->updatee = to;
3208 recordMutable((StgMutClosure *)to);
3211 /* will never be SE_{,CAF_}BLACKHOLE, since we
3212 don't push an update frame for single-entry thunks. KSW 1999-01. */
3213 barf("scavenge_stack: UPDATE_FRAME updatee");
3219 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3226 bitmap = info->layout.bitmap;
3228 // this assumes that the payload starts immediately after the info-ptr
3230 while (bitmap != 0) {
3231 if ((bitmap & 1) == 0) {
3232 (StgClosure *)*p = evacuate((StgClosure *)*p);
3235 bitmap = bitmap >> 1;
3242 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3247 StgLargeBitmap *large_bitmap;
3250 large_bitmap = info->layout.large_bitmap;
3253 for (i=0; i<large_bitmap->size; i++) {
3254 bitmap = large_bitmap->bitmap[i];
3255 q = p + BITS_IN(W_);
3256 while (bitmap != 0) {
3257 if ((bitmap & 1) == 0) {
3258 (StgClosure *)*p = evacuate((StgClosure *)*p);
3261 bitmap = bitmap >> 1;
3263 if (i+1 < large_bitmap->size) {
3265 (StgClosure *)*p = evacuate((StgClosure *)*p);
3271 // and don't forget to follow the SRT
3276 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3281 /*-----------------------------------------------------------------------------
3282 scavenge the large object list.
3284 evac_gen set by caller; similar games played with evac_gen as with
3285 scavenge() - see comment at the top of scavenge(). Most large
3286 objects are (repeatedly) mutable, so most of the time evac_gen will
3288 --------------------------------------------------------------------------- */
3291 scavenge_large(step *stp)
3295 const StgInfoTable* info;
3296 nat saved_evac_gen = evac_gen; // used for temporarily changing evac_gen
3298 bd = stp->new_large_objects;
3300 for (; bd != NULL; bd = stp->new_large_objects) {
3302 /* take this object *off* the large objects list and put it on
3303 * the scavenged large objects list. This is so that we can
3304 * treat new_large_objects as a stack and push new objects on
3305 * the front when evacuating.
3307 stp->new_large_objects = bd->link;
3308 dbl_link_onto(bd, &stp->scavenged_large_objects);
3310 // update the block count in this step.
3311 stp->n_scavenged_large_blocks += bd->blocks;
3314 info = get_itbl((StgClosure *)p);
3316 // only certain objects can be "large"...
3318 switch (info->type) {
3321 // nothing to follow
3326 // follow everything
3329 evac_gen = 0; // repeatedly mutable
3330 recordMutable((StgMutClosure *)p);
3331 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3332 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3333 (StgClosure *)*p = evacuate((StgClosure *)*p);
3335 evac_gen = saved_evac_gen;
3336 failed_to_evac = rtsFalse;
3340 case MUT_ARR_PTRS_FROZEN:
3342 // follow everything
3345 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3346 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3347 (StgClosure *)*p = evacuate((StgClosure *)*p);
3354 StgTSO *tso = (StgTSO *)p;
3356 evac_gen = 0; // repeatedly mutable
3358 recordMutable((StgMutClosure *)tso);
3359 evac_gen = saved_evac_gen;
3360 failed_to_evac = rtsFalse;
3367 StgPAP* pap = (StgPAP *)p;
3368 pap->fun = evacuate(pap->fun);
3369 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3374 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3377 if (failed_to_evac) {
3378 failed_to_evac = rtsFalse;
3379 mkMutCons((StgClosure *)q, &generations[evac_gen]);
3384 /* -----------------------------------------------------------------------------
3385 Initialising the static object & mutable lists
3386 -------------------------------------------------------------------------- */
3389 zero_static_object_list(StgClosure* first_static)
3393 const StgInfoTable *info;
3395 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3397 link = STATIC_LINK(info, p);
3398 STATIC_LINK(info,p) = NULL;
3402 /* This function is only needed because we share the mutable link
3403 * field with the static link field in an IND_STATIC, so we have to
3404 * zero the mut_link field before doing a major GC, which needs the
3405 * static link field.
3407 * It doesn't do any harm to zero all the mutable link fields on the
3412 zero_mutable_list( StgMutClosure *first )
3414 StgMutClosure *next, *c;
3416 for (c = first; c != END_MUT_LIST; c = next) {
3422 /* -----------------------------------------------------------------------------
3424 -------------------------------------------------------------------------- */
3431 for (c = (StgIndStatic *)caf_list; c != NULL;
3432 c = (StgIndStatic *)c->static_link)
3434 c->header.info = c->saved_info;
3435 c->saved_info = NULL;
3436 // could, but not necessary: c->static_link = NULL;
3442 scavengeCAFs( void )
3447 for (c = (StgIndStatic *)caf_list; c != NULL;
3448 c = (StgIndStatic *)c->static_link)
3450 c->indirectee = evacuate(c->indirectee);
3454 /* -----------------------------------------------------------------------------
3455 Sanity code for CAF garbage collection.
3457 With DEBUG turned on, we manage a CAF list in addition to the SRT
3458 mechanism. After GC, we run down the CAF list and blackhole any
3459 CAFs which have been garbage collected. This means we get an error
3460 whenever the program tries to enter a garbage collected CAF.
3462 Any garbage collected CAFs are taken off the CAF list at the same
3464 -------------------------------------------------------------------------- */
3466 #if 0 && defined(DEBUG)
3473 const StgInfoTable *info;
3484 ASSERT(info->type == IND_STATIC);
3486 if (STATIC_LINK(info,p) == NULL) {
3487 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04lx\n", (long)p));
3489 SET_INFO(p,&stg_BLACKHOLE_info);
3490 p = STATIC_LINK2(info,p);
3494 pp = &STATIC_LINK2(info,p);
3501 // fprintf(stderr, "%d CAFs live\n", i);
3506 /* -----------------------------------------------------------------------------
3509 Whenever a thread returns to the scheduler after possibly doing
3510 some work, we have to run down the stack and black-hole all the
3511 closures referred to by update frames.
3512 -------------------------------------------------------------------------- */
3515 threadLazyBlackHole(StgTSO *tso)
3517 StgUpdateFrame *update_frame;
3518 StgBlockingQueue *bh;
3521 stack_end = &tso->stack[tso->stack_size];
3522 update_frame = tso->su;
3525 switch (get_itbl(update_frame)->type) {
3528 update_frame = ((StgCatchFrame *)update_frame)->link;
3532 bh = (StgBlockingQueue *)update_frame->updatee;
3534 /* if the thunk is already blackholed, it means we've also
3535 * already blackholed the rest of the thunks on this stack,
3536 * so we can stop early.
3538 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3539 * don't interfere with this optimisation.
3541 if (bh->header.info == &stg_BLACKHOLE_info) {
3545 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3546 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3547 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3548 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3550 SET_INFO(bh,&stg_BLACKHOLE_info);
3553 update_frame = update_frame->link;
3557 update_frame = ((StgSeqFrame *)update_frame)->link;
3563 barf("threadPaused");
3569 /* -----------------------------------------------------------------------------
3572 * Code largely pinched from old RTS, then hacked to bits. We also do
3573 * lazy black holing here.
3575 * -------------------------------------------------------------------------- */
3578 threadSqueezeStack(StgTSO *tso)
3580 lnat displacement = 0;
3581 StgUpdateFrame *frame;
3582 StgUpdateFrame *next_frame; // Temporally next
3583 StgUpdateFrame *prev_frame; // Temporally previous
3585 rtsBool prev_was_update_frame;
3587 StgUpdateFrame *top_frame;
3588 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3590 void printObj( StgClosure *obj ); // from Printer.c
3592 top_frame = tso->su;
3595 bottom = &(tso->stack[tso->stack_size]);
3598 /* There must be at least one frame, namely the STOP_FRAME.
3600 ASSERT((P_)frame < bottom);
3602 /* Walk down the stack, reversing the links between frames so that
3603 * we can walk back up as we squeeze from the bottom. Note that
3604 * next_frame and prev_frame refer to next and previous as they were
3605 * added to the stack, rather than the way we see them in this
3606 * walk. (It makes the next loop less confusing.)
3608 * Stop if we find an update frame pointing to a black hole
3609 * (see comment in threadLazyBlackHole()).
3613 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3614 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3615 prev_frame = frame->link;
3616 frame->link = next_frame;
3621 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3622 printObj((StgClosure *)prev_frame);
3623 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3626 switch (get_itbl(frame)->type) {
3629 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3642 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3644 printObj((StgClosure *)prev_frame);
3647 if (get_itbl(frame)->type == UPDATE_FRAME
3648 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3653 /* Now, we're at the bottom. Frame points to the lowest update
3654 * frame on the stack, and its link actually points to the frame
3655 * above. We have to walk back up the stack, squeezing out empty
3656 * update frames and turning the pointers back around on the way
3659 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3660 * we never want to eliminate it anyway. Just walk one step up
3661 * before starting to squeeze. When you get to the topmost frame,
3662 * remember that there are still some words above it that might have
3669 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3672 * Loop through all of the frames (everything except the very
3673 * bottom). Things are complicated by the fact that we have
3674 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3675 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3677 while (frame != NULL) {
3679 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3680 rtsBool is_update_frame;
3682 next_frame = frame->link;
3683 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3686 * 1. both the previous and current frame are update frames
3687 * 2. the current frame is empty
3689 if (prev_was_update_frame && is_update_frame &&
3690 (P_)prev_frame == frame_bottom + displacement) {
3692 // Now squeeze out the current frame
3693 StgClosure *updatee_keep = prev_frame->updatee;
3694 StgClosure *updatee_bypass = frame->updatee;
3697 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3701 /* Deal with blocking queues. If both updatees have blocked
3702 * threads, then we should merge the queues into the update
3703 * frame that we're keeping.
3705 * Alternatively, we could just wake them up: they'll just go
3706 * straight to sleep on the proper blackhole! This is less code
3707 * and probably less bug prone, although it's probably much
3710 #if 0 // do it properly...
3711 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3712 # error Unimplemented lazy BH warning. (KSW 1999-01)
3714 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3715 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3717 // Sigh. It has one. Don't lose those threads!
3718 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3719 // Urgh. Two queues. Merge them.
3720 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3722 while (keep_tso->link != END_TSO_QUEUE) {
3723 keep_tso = keep_tso->link;
3725 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3728 // For simplicity, just swap the BQ for the BH
3729 P_ temp = updatee_keep;
3731 updatee_keep = updatee_bypass;
3732 updatee_bypass = temp;
3734 // Record the swap in the kept frame (below)
3735 prev_frame->updatee = updatee_keep;
3740 TICK_UPD_SQUEEZED();
3741 /* wasn't there something about update squeezing and ticky to be
3742 * sorted out? oh yes: we aren't counting each enter properly
3743 * in this case. See the log somewhere. KSW 1999-04-21
3745 * Check two things: that the two update frames don't point to
3746 * the same object, and that the updatee_bypass isn't already an
3747 * indirection. Both of these cases only happen when we're in a
3748 * block hole-style loop (and there are multiple update frames
3749 * on the stack pointing to the same closure), but they can both
3750 * screw us up if we don't check.
3752 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3753 // this wakes the threads up
3754 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3757 sp = (P_)frame - 1; // sp = stuff to slide
3758 displacement += sizeofW(StgUpdateFrame);
3761 // No squeeze for this frame
3762 sp = frame_bottom - 1; // Keep the current frame
3764 /* Do lazy black-holing.
3766 if (is_update_frame) {
3767 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3768 if (bh->header.info != &stg_BLACKHOLE_info &&
3769 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3770 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3771 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3772 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3775 /* zero out the slop so that the sanity checker can tell
3776 * where the next closure is.
3779 StgInfoTable *info = get_itbl(bh);
3780 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3781 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3782 * info is used for a different purpose, and it's exactly the
3783 * same size as a BLACKHOLE in any case.
3785 if (info->type != THUNK_SELECTOR) {
3786 for (i = np; i < np + nw; i++) {
3787 ((StgClosure *)bh)->payload[i] = 0;
3792 SET_INFO(bh,&stg_BLACKHOLE_info);
3796 // Fix the link in the current frame (should point to the frame below)
3797 frame->link = prev_frame;
3798 prev_was_update_frame = is_update_frame;
3801 // Now slide all words from sp up to the next frame
3803 if (displacement > 0) {
3804 P_ next_frame_bottom;
3806 if (next_frame != NULL)
3807 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3809 next_frame_bottom = tso->sp - 1;
3813 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3817 while (sp >= next_frame_bottom) {
3818 sp[displacement] = *sp;
3822 (P_)prev_frame = (P_)frame + displacement;
3826 tso->sp += displacement;
3827 tso->su = prev_frame;
3830 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3831 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3836 /* -----------------------------------------------------------------------------
3839 * We have to prepare for GC - this means doing lazy black holing
3840 * here. We also take the opportunity to do stack squeezing if it's
3842 * -------------------------------------------------------------------------- */
3844 threadPaused(StgTSO *tso)
3846 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3847 threadSqueezeStack(tso); // does black holing too
3849 threadLazyBlackHole(tso);
3852 /* -----------------------------------------------------------------------------
3854 * -------------------------------------------------------------------------- */
3858 printMutOnceList(generation *gen)
3860 StgMutClosure *p, *next;
3862 p = gen->mut_once_list;
3865 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3866 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3867 fprintf(stderr, "%p (%s), ",
3868 p, info_type((StgClosure *)p));
3870 fputc('\n', stderr);
3874 printMutableList(generation *gen)
3876 StgMutClosure *p, *next;
3881 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3882 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3883 fprintf(stderr, "%p (%s), ",
3884 p, info_type((StgClosure *)p));
3886 fputc('\n', stderr);
3889 static inline rtsBool
3890 maybeLarge(StgClosure *closure)
3892 StgInfoTable *info = get_itbl(closure);
3894 /* closure types that may be found on the new_large_objects list;
3895 see scavenge_large */
3896 return (info->type == MUT_ARR_PTRS ||
3897 info->type == MUT_ARR_PTRS_FROZEN ||
3898 info->type == TSO ||
3899 info->type == ARR_WORDS);