1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.107 2001/07/24 16:36:43 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void cleanup_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgClosure *p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
145 static void scavengeCAFs ( void );
147 #if 0 && defined(DEBUG)
148 static void gcCAFs ( void );
151 /* -----------------------------------------------------------------------------
152 inline functions etc. for dealing with the mark bitmap & stack.
153 -------------------------------------------------------------------------- */
155 #define MARK_STACK_BLOCKS 4
157 static bdescr *mark_stack_bdescr;
158 static StgPtr *mark_stack;
159 static StgPtr *mark_sp;
160 static StgPtr *mark_splim;
162 static inline rtsBool
163 mark_stack_empty(void)
165 return mark_sp == mark_stack;
168 static inline rtsBool
169 mark_stack_full(void)
171 return mark_sp >= mark_splim;
175 push_mark_stack(StgPtr p)
186 /* -----------------------------------------------------------------------------
189 For garbage collecting generation N (and all younger generations):
191 - follow all pointers in the root set. the root set includes all
192 mutable objects in all steps in all generations.
194 - for each pointer, evacuate the object it points to into either
195 + to-space in the next higher step in that generation, if one exists,
196 + if the object's generation == N, then evacuate it to the next
197 generation if one exists, or else to-space in the current
199 + if the object's generation < N, then evacuate it to to-space
200 in the next generation.
202 - repeatedly scavenge to-space from each step in each generation
203 being collected until no more objects can be evacuated.
205 - free from-space in each step, and set from-space = to-space.
207 -------------------------------------------------------------------------- */
210 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
214 lnat live, allocated, collected = 0, copied = 0;
215 lnat oldgen_saved_blocks = 0;
219 CostCentreStack *prev_CCS;
222 #if defined(DEBUG) && defined(GRAN)
223 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
227 // tell the stats department that we've started a GC
230 // Init stats and print par specific (timing) info
231 PAR_TICKY_PAR_START();
233 // attribute any costs to CCS_GC
239 /* Approximate how much we allocated.
240 * Todo: only when generating stats?
242 allocated = calcAllocated();
244 /* Figure out which generation to collect
246 if (force_major_gc) {
247 N = RtsFlags.GcFlags.generations - 1;
251 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
252 if (generations[g].steps[0].n_blocks +
253 generations[g].steps[0].n_large_blocks
254 >= generations[g].max_blocks) {
258 major_gc = (N == RtsFlags.GcFlags.generations-1);
261 #ifdef RTS_GTK_FRONTPANEL
262 if (RtsFlags.GcFlags.frontpanel) {
263 updateFrontPanelBeforeGC(N);
267 // check stack sanity *before* GC (ToDo: check all threads)
269 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
271 IF_DEBUG(sanity, checkFreeListSanity());
273 /* Initialise the static object lists
275 static_objects = END_OF_STATIC_LIST;
276 scavenged_static_objects = END_OF_STATIC_LIST;
278 /* zero the mutable list for the oldest generation (see comment by
279 * zero_mutable_list below).
282 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
285 /* Save the old to-space if we're doing a two-space collection
287 if (RtsFlags.GcFlags.generations == 1) {
288 old_to_blocks = g0s0->to_blocks;
289 g0s0->to_blocks = NULL;
292 /* Keep a count of how many new blocks we allocated during this GC
293 * (used for resizing the allocation area, later).
297 /* Initialise to-space in all the generations/steps that we're
300 for (g = 0; g <= N; g++) {
301 generations[g].mut_once_list = END_MUT_LIST;
302 generations[g].mut_list = END_MUT_LIST;
304 for (s = 0; s < generations[g].n_steps; s++) {
306 // generation 0, step 0 doesn't need to-space
307 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
311 /* Get a free block for to-space. Extra blocks will be chained on
315 stp = &generations[g].steps[s];
316 ASSERT(stp->gen_no == g);
317 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
321 bd->flags = BF_EVACUATED; // it's a to-space block
323 stp->hpLim = stp->hp + BLOCK_SIZE_W;
326 stp->n_to_blocks = 1;
327 stp->scan = bd->start;
329 stp->new_large_objects = NULL;
330 stp->scavenged_large_objects = NULL;
331 stp->n_scavenged_large_blocks = 0;
333 // mark the large objects as not evacuated yet
334 for (bd = stp->large_objects; bd; bd = bd->link) {
335 bd->flags = BF_LARGE;
338 // for a compacted step, we need to allocate the bitmap
339 if (stp->is_compacted) {
340 nat bitmap_size; // in bytes
341 bdescr *bitmap_bdescr;
344 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
346 if (bitmap_size > 0) {
347 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
349 stp->bitmap = bitmap_bdescr;
350 bitmap = bitmap_bdescr->start;
352 IF_DEBUG(gc, fprintf(stderr, "bitmap_size: %d, bitmap: %p\n",
353 bitmap_size, bitmap););
355 // don't forget to fill it with zeros!
356 memset(bitmap, 0, bitmap_size);
358 // for each block in this step, point to its bitmap from the
360 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
361 bd->u.bitmap = bitmap;
362 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
369 /* make sure the older generations have at least one block to
370 * allocate into (this makes things easier for copy(), see below.
372 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
373 for (s = 0; s < generations[g].n_steps; s++) {
374 stp = &generations[g].steps[s];
375 if (stp->hp_bd == NULL) {
376 ASSERT(stp->blocks == NULL);
381 bd->flags = 0; // *not* a to-space block or a large object
383 stp->hpLim = stp->hp + BLOCK_SIZE_W;
389 /* Set the scan pointer for older generations: remember we
390 * still have to scavenge objects that have been promoted. */
392 stp->scan_bd = stp->hp_bd;
393 stp->to_blocks = NULL;
394 stp->n_to_blocks = 0;
395 stp->new_large_objects = NULL;
396 stp->scavenged_large_objects = NULL;
397 stp->n_scavenged_large_blocks = 0;
401 /* Allocate a mark stack if we're doing a major collection.
404 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
405 mark_stack = (StgPtr *)mark_stack_bdescr->start;
406 mark_sp = mark_stack;
407 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
409 mark_stack_bdescr = NULL;
412 /* -----------------------------------------------------------------------
413 * follow all the roots that we know about:
414 * - mutable lists from each generation > N
415 * we want to *scavenge* these roots, not evacuate them: they're not
416 * going to move in this GC.
417 * Also: do them in reverse generation order. This is because we
418 * often want to promote objects that are pointed to by older
419 * generations early, so we don't have to repeatedly copy them.
420 * Doing the generations in reverse order ensures that we don't end
421 * up in the situation where we want to evac an object to gen 3 and
422 * it has already been evaced to gen 2.
426 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
427 generations[g].saved_mut_list = generations[g].mut_list;
428 generations[g].mut_list = END_MUT_LIST;
431 // Do the mut-once lists first
432 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
433 IF_PAR_DEBUG(verbose,
434 printMutOnceList(&generations[g]));
435 scavenge_mut_once_list(&generations[g]);
437 for (st = generations[g].n_steps-1; st >= 0; st--) {
438 scavenge(&generations[g].steps[st]);
442 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
443 IF_PAR_DEBUG(verbose,
444 printMutableList(&generations[g]));
445 scavenge_mutable_list(&generations[g]);
447 for (st = generations[g].n_steps-1; st >= 0; st--) {
448 scavenge(&generations[g].steps[st]);
455 /* follow all the roots that the application knows about.
458 get_roots(mark_root);
461 /* And don't forget to mark the TSO if we got here direct from
463 /* Not needed in a seq version?
465 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
469 // Mark the entries in the GALA table of the parallel system
470 markLocalGAs(major_gc);
471 // Mark all entries on the list of pending fetches
472 markPendingFetches(major_gc);
475 /* Mark the weak pointer list, and prepare to detect dead weak
478 old_weak_ptr_list = weak_ptr_list;
479 weak_ptr_list = NULL;
480 weak_done = rtsFalse;
482 /* The all_threads list is like the weak_ptr_list.
483 * See traverse_weak_ptr_list() for the details.
485 old_all_threads = all_threads;
486 all_threads = END_TSO_QUEUE;
487 resurrected_threads = END_TSO_QUEUE;
489 /* Mark the stable pointer table.
491 markStablePtrTable(mark_root);
495 /* ToDo: To fix the caf leak, we need to make the commented out
496 * parts of this code do something sensible - as described in
499 extern void markHugsObjects(void);
504 /* -------------------------------------------------------------------------
505 * Repeatedly scavenge all the areas we know about until there's no
506 * more scavenging to be done.
513 // scavenge static objects
514 if (major_gc && static_objects != END_OF_STATIC_LIST) {
515 IF_DEBUG(sanity, checkStaticObjects(static_objects));
519 // scavenge objects in compacted generation
520 if (mark_stack_bdescr != NULL && !mark_stack_empty()) {
521 scavenge_mark_stack();
525 /* When scavenging the older generations: Objects may have been
526 * evacuated from generations <= N into older generations, and we
527 * need to scavenge these objects. We're going to try to ensure that
528 * any evacuations that occur move the objects into at least the
529 * same generation as the object being scavenged, otherwise we
530 * have to create new entries on the mutable list for the older
534 // scavenge each step in generations 0..maxgen
539 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
540 for (st = generations[gen].n_steps; --st >= 0; ) {
541 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
544 stp = &generations[gen].steps[st];
546 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
551 if (stp->new_large_objects != NULL) {
560 if (flag) { goto loop; }
563 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
568 /* Final traversal of the weak pointer list (see comment by
569 * cleanUpWeakPtrList below).
571 cleanup_weak_ptr_list(&weak_ptr_list);
574 // Reconstruct the Global Address tables used in GUM
575 rebuildGAtables(major_gc);
576 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
579 // Now see which stable names are still alive.
582 // Tidy the end of the to-space chains
583 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
584 for (s = 0; s < generations[g].n_steps; s++) {
585 stp = &generations[g].steps[s];
586 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
587 stp->hp_bd->free = stp->hp;
588 stp->hp_bd->link = NULL;
593 // NO MORE EVACUATION AFTER THIS POINT!
594 // Finally: compaction of the oldest generation.
595 if (major_gc && RtsFlags.GcFlags.compact) {
596 // save number of blocks for stats
597 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
601 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
603 /* run through all the generations/steps and tidy up
605 copied = new_blocks * BLOCK_SIZE_W;
606 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
609 generations[g].collections++; // for stats
612 for (s = 0; s < generations[g].n_steps; s++) {
614 stp = &generations[g].steps[s];
616 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
617 // stats information: how much we copied
619 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
624 // for generations we collected...
627 // rough calculation of garbage collected, for stats output
628 if (stp->is_compacted) {
629 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
631 collected += stp->n_blocks * BLOCK_SIZE_W;
634 /* free old memory and shift to-space into from-space for all
635 * the collected steps (except the allocation area). These
636 * freed blocks will probaby be quickly recycled.
638 if (!(g == 0 && s == 0)) {
639 if (stp->is_compacted) {
640 // for a compacted step, just shift the new to-space
641 // onto the front of the now-compacted existing blocks.
642 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
643 bd->flags &= ~BF_EVACUATED; // now from-space
645 // tack the new blocks on the end of the existing blocks
646 if (stp->blocks == NULL) {
647 stp->blocks = stp->to_blocks;
649 for (bd = stp->blocks; bd != NULL; bd = next) {
652 bd->link = stp->to_blocks;
656 // add the new blocks to the block tally
657 stp->n_blocks += stp->n_to_blocks;
659 freeChain(stp->blocks);
660 stp->blocks = stp->to_blocks;
661 stp->n_blocks = stp->n_to_blocks;
662 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
663 bd->flags &= ~BF_EVACUATED; // now from-space
666 stp->to_blocks = NULL;
667 stp->n_to_blocks = 0;
670 /* LARGE OBJECTS. The current live large objects are chained on
671 * scavenged_large, having been moved during garbage
672 * collection from large_objects. Any objects left on
673 * large_objects list are therefore dead, so we free them here.
675 for (bd = stp->large_objects; bd != NULL; bd = next) {
681 // update the count of blocks used by large objects
682 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
683 bd->flags &= ~BF_EVACUATED;
685 stp->large_objects = stp->scavenged_large_objects;
686 stp->n_large_blocks = stp->n_scavenged_large_blocks;
688 /* Set the maximum blocks for this generation, interpolating
689 * between the maximum size of the oldest and youngest
692 * max_blocks = oldgen_max_blocks * G
693 * ----------------------
698 generations[g].max_blocks = (oldest_gen->max_blocks * g)
699 / (RtsFlags.GcFlags.generations-1);
701 generations[g].max_blocks = oldest_gen->max_blocks;
704 // for older generations...
707 /* For older generations, we need to append the
708 * scavenged_large_object list (i.e. large objects that have been
709 * promoted during this GC) to the large_object list for that step.
711 for (bd = stp->scavenged_large_objects; bd; bd = next) {
713 bd->flags &= ~BF_EVACUATED;
714 dbl_link_onto(bd, &stp->large_objects);
717 // add the new blocks we promoted during this GC
718 stp->n_blocks += stp->n_to_blocks;
719 stp->n_large_blocks += stp->n_scavenged_large_blocks;
724 /* Set the maximum blocks for the oldest generation, based on twice
725 * the amount of live data now, adjusted to fit the maximum heap
728 * This is an approximation, since in the worst case we'll need
729 * twice the amount of live data plus whatever space the other
732 if (major_gc && RtsFlags.GcFlags.generations > 1) {
733 oldest_gen->max_blocks =
734 stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor,
735 RtsFlags.GcFlags.minOldGenSize);
736 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
737 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
738 if (((int)oldest_gen->max_blocks -
739 (int)oldest_gen->steps[0].n_blocks) <
740 (RtsFlags.GcFlags.pcFreeHeap *
741 RtsFlags.GcFlags.maxHeapSize / 200)) {
747 // Guess the amount of live data for stats.
750 /* Free the small objects allocated via allocate(), since this will
751 * all have been copied into G0S1 now.
753 if (small_alloc_list != NULL) {
754 freeChain(small_alloc_list);
756 small_alloc_list = NULL;
760 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
762 /* Free the mark stack.
764 if (mark_stack_bdescr != NULL) {
765 freeGroup(mark_stack_bdescr);
770 for (g = 0; g <= N; g++) {
771 for (s = 0; s < generations[g].n_steps; s++) {
772 stp = &generations[g].steps[s];
773 if (stp->is_compacted && stp->bitmap != NULL) {
774 freeGroup(stp->bitmap);
779 /* Two-space collector:
780 * Free the old to-space, and estimate the amount of live data.
782 if (RtsFlags.GcFlags.generations == 1) {
785 if (old_to_blocks != NULL) {
786 freeChain(old_to_blocks);
788 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
789 bd->flags = 0; // now from-space
792 /* For a two-space collector, we need to resize the nursery. */
794 /* set up a new nursery. Allocate a nursery size based on a
795 * function of the amount of live data (currently a factor of 2,
796 * should be configurable (ToDo)). Use the blocks from the old
797 * nursery if possible, freeing up any left over blocks.
799 * If we get near the maximum heap size, then adjust our nursery
800 * size accordingly. If the nursery is the same size as the live
801 * data (L), then we need 3L bytes. We can reduce the size of the
802 * nursery to bring the required memory down near 2L bytes.
804 * A normal 2-space collector would need 4L bytes to give the same
805 * performance we get from 3L bytes, reducing to the same
806 * performance at 2L bytes.
808 blocks = g0s0->n_to_blocks;
810 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
811 RtsFlags.GcFlags.maxHeapSize ) {
812 long adjusted_blocks; // signed on purpose
815 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
816 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
817 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
818 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
821 blocks = adjusted_blocks;
824 blocks *= RtsFlags.GcFlags.oldGenFactor;
825 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
826 blocks = RtsFlags.GcFlags.minAllocAreaSize;
829 resizeNursery(blocks);
832 /* Generational collector:
833 * If the user has given us a suggested heap size, adjust our
834 * allocation area to make best use of the memory available.
837 if (RtsFlags.GcFlags.heapSizeSuggestion) {
839 nat needed = calcNeeded(); // approx blocks needed at next GC
841 /* Guess how much will be live in generation 0 step 0 next time.
842 * A good approximation is obtained by finding the
843 * percentage of g0s0 that was live at the last minor GC.
846 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
849 /* Estimate a size for the allocation area based on the
850 * information available. We might end up going slightly under
851 * or over the suggested heap size, but we should be pretty
854 * Formula: suggested - needed
855 * ----------------------------
856 * 1 + g0s0_pcnt_kept/100
858 * where 'needed' is the amount of memory needed at the next
859 * collection for collecting all steps except g0s0.
862 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
863 (100 + (long)g0s0_pcnt_kept);
865 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
866 blocks = RtsFlags.GcFlags.minAllocAreaSize;
869 resizeNursery((nat)blocks);
873 // mark the garbage collected CAFs as dead
874 #if 0 && defined(DEBUG) // doesn't work at the moment
875 if (major_gc) { gcCAFs(); }
878 // zero the scavenged static object list
880 zero_static_object_list(scavenged_static_objects);
887 // start any pending finalizers
888 scheduleFinalizers(old_weak_ptr_list);
890 // send exceptions to any threads which were about to die
891 resurrectThreads(resurrected_threads);
893 // Update the stable pointer hash table.
894 updateStablePtrTable(major_gc);
896 // check sanity after GC
897 IF_DEBUG(sanity, checkSanity());
899 // extra GC trace info
900 IF_DEBUG(gc, statDescribeGens());
903 // symbol-table based profiling
904 /* heapCensus(to_blocks); */ /* ToDo */
907 // restore enclosing cost centre
913 // check for memory leaks if sanity checking is on
914 IF_DEBUG(sanity, memInventory());
916 #ifdef RTS_GTK_FRONTPANEL
917 if (RtsFlags.GcFlags.frontpanel) {
918 updateFrontPanelAfterGC( N, live );
922 // ok, GC over: tell the stats department what happened.
923 stat_endGC(allocated, collected, live, copied, N);
929 /* -----------------------------------------------------------------------------
932 traverse_weak_ptr_list is called possibly many times during garbage
933 collection. It returns a flag indicating whether it did any work
934 (i.e. called evacuate on any live pointers).
936 Invariant: traverse_weak_ptr_list is called when the heap is in an
937 idempotent state. That means that there are no pending
938 evacuate/scavenge operations. This invariant helps the weak
939 pointer code decide which weak pointers are dead - if there are no
940 new live weak pointers, then all the currently unreachable ones are
943 For generational GC: we just don't try to finalize weak pointers in
944 older generations than the one we're collecting. This could
945 probably be optimised by keeping per-generation lists of weak
946 pointers, but for a few weak pointers this scheme will work.
947 -------------------------------------------------------------------------- */
950 traverse_weak_ptr_list(void)
952 StgWeak *w, **last_w, *next_w;
954 rtsBool flag = rtsFalse;
956 if (weak_done) { return rtsFalse; }
958 /* doesn't matter where we evacuate values/finalizers to, since
959 * these pointers are treated as roots (iff the keys are alive).
963 last_w = &old_weak_ptr_list;
964 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
966 /* First, this weak pointer might have been evacuated. If so,
967 * remove the forwarding pointer from the weak_ptr_list.
969 if (get_itbl(w)->type == EVACUATED) {
970 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
974 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
975 * called on a live weak pointer object. Just remove it.
977 if (w->header.info == &stg_DEAD_WEAK_info) {
978 next_w = ((StgDeadWeak *)w)->link;
983 ASSERT(get_itbl(w)->type == WEAK);
985 /* Now, check whether the key is reachable.
987 if ((new = isAlive(w->key))) {
989 // evacuate the value and finalizer
990 w->value = evacuate(w->value);
991 w->finalizer = evacuate(w->finalizer);
992 // remove this weak ptr from the old_weak_ptr list
994 // and put it on the new weak ptr list
996 w->link = weak_ptr_list;
999 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
1003 last_w = &(w->link);
1009 /* Now deal with the all_threads list, which behaves somewhat like
1010 * the weak ptr list. If we discover any threads that are about to
1011 * become garbage, we wake them up and administer an exception.
1014 StgTSO *t, *tmp, *next, **prev;
1016 prev = &old_all_threads;
1017 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1019 (StgClosure *)tmp = isAlive((StgClosure *)t);
1025 ASSERT(get_itbl(t)->type == TSO);
1026 switch (t->what_next) {
1027 case ThreadRelocated:
1032 case ThreadComplete:
1033 // finshed or died. The thread might still be alive, but we
1034 // don't keep it on the all_threads list. Don't forget to
1035 // stub out its global_link field.
1036 next = t->global_link;
1037 t->global_link = END_TSO_QUEUE;
1045 // not alive (yet): leave this thread on the old_all_threads list.
1046 prev = &(t->global_link);
1047 next = t->global_link;
1051 // alive: move this thread onto the all_threads list.
1052 next = t->global_link;
1053 t->global_link = all_threads;
1061 /* If we didn't make any changes, then we can go round and kill all
1062 * the dead weak pointers. The old_weak_ptr list is used as a list
1063 * of pending finalizers later on.
1065 if (flag == rtsFalse) {
1066 cleanup_weak_ptr_list(&old_weak_ptr_list);
1067 for (w = old_weak_ptr_list; w; w = w->link) {
1068 w->finalizer = evacuate(w->finalizer);
1071 /* And resurrect any threads which were about to become garbage.
1074 StgTSO *t, *tmp, *next;
1075 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1076 next = t->global_link;
1077 (StgClosure *)tmp = evacuate((StgClosure *)t);
1078 tmp->global_link = resurrected_threads;
1079 resurrected_threads = tmp;
1083 weak_done = rtsTrue;
1089 /* -----------------------------------------------------------------------------
1090 After GC, the live weak pointer list may have forwarding pointers
1091 on it, because a weak pointer object was evacuated after being
1092 moved to the live weak pointer list. We remove those forwarding
1095 Also, we don't consider weak pointer objects to be reachable, but
1096 we must nevertheless consider them to be "live" and retain them.
1097 Therefore any weak pointer objects which haven't as yet been
1098 evacuated need to be evacuated now.
1099 -------------------------------------------------------------------------- */
1103 cleanup_weak_ptr_list ( StgWeak **list )
1105 StgWeak *w, **last_w;
1108 for (w = *list; w; w = w->link) {
1110 if (get_itbl(w)->type == EVACUATED) {
1111 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1115 if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) {
1116 (StgClosure *)w = evacuate((StgClosure *)w);
1119 last_w = &(w->link);
1123 /* -----------------------------------------------------------------------------
1124 isAlive determines whether the given closure is still alive (after
1125 a garbage collection) or not. It returns the new address of the
1126 closure if it is alive, or NULL otherwise.
1128 NOTE: Use it before compaction only!
1129 -------------------------------------------------------------------------- */
1133 isAlive(StgClosure *p)
1135 const StgInfoTable *info;
1142 /* ToDo: for static closures, check the static link field.
1143 * Problem here is that we sometimes don't set the link field, eg.
1144 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1149 // ignore closures in generations that we're not collecting.
1150 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1153 // large objects have an evacuated flag
1154 if (bd->flags & BF_LARGE) {
1155 if (bd->flags & BF_EVACUATED) {
1161 // check the mark bit for compacted steps
1162 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1166 switch (info->type) {
1171 case IND_OLDGEN: // rely on compatible layout with StgInd
1172 case IND_OLDGEN_PERM:
1173 // follow indirections
1174 p = ((StgInd *)p)->indirectee;
1179 return ((StgEvacuated *)p)->evacuee;
1182 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1183 p = (StgClosure *)((StgTSO *)p)->link;
1195 mark_root(StgClosure **root)
1197 *root = evacuate(*root);
1203 bdescr *bd = allocBlock();
1204 bd->gen_no = stp->gen_no;
1207 if (stp->gen_no <= N) {
1208 bd->flags = BF_EVACUATED;
1213 stp->hp_bd->free = stp->hp;
1214 stp->hp_bd->link = bd;
1215 stp->hp = bd->start;
1216 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1223 static __inline__ void
1224 upd_evacuee(StgClosure *p, StgClosure *dest)
1226 p->header.info = &stg_EVACUATED_info;
1227 ((StgEvacuated *)p)->evacuee = dest;
1231 static __inline__ StgClosure *
1232 copy(StgClosure *src, nat size, step *stp)
1236 TICK_GC_WORDS_COPIED(size);
1237 /* Find out where we're going, using the handy "to" pointer in
1238 * the step of the source object. If it turns out we need to
1239 * evacuate to an older generation, adjust it here (see comment
1242 if (stp->gen_no < evac_gen) {
1243 #ifdef NO_EAGER_PROMOTION
1244 failed_to_evac = rtsTrue;
1246 stp = &generations[evac_gen].steps[0];
1250 /* chain a new block onto the to-space for the destination step if
1253 if (stp->hp + size >= stp->hpLim) {
1257 for(to = stp->hp, from = (P_)src; size>0; --size) {
1263 upd_evacuee(src,(StgClosure *)dest);
1264 return (StgClosure *)dest;
1267 /* Special version of copy() for when we only want to copy the info
1268 * pointer of an object, but reserve some padding after it. This is
1269 * used to optimise evacuation of BLACKHOLEs.
1273 static __inline__ StgClosure *
1274 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1278 TICK_GC_WORDS_COPIED(size_to_copy);
1279 if (stp->gen_no < evac_gen) {
1280 #ifdef NO_EAGER_PROMOTION
1281 failed_to_evac = rtsTrue;
1283 stp = &generations[evac_gen].steps[0];
1287 if (stp->hp + size_to_reserve >= stp->hpLim) {
1291 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1296 stp->hp += size_to_reserve;
1297 upd_evacuee(src,(StgClosure *)dest);
1298 return (StgClosure *)dest;
1302 /* -----------------------------------------------------------------------------
1303 Evacuate a large object
1305 This just consists of removing the object from the (doubly-linked)
1306 large_alloc_list, and linking it on to the (singly-linked)
1307 new_large_objects list, from where it will be scavenged later.
1309 Convention: bd->flags has BF_EVACUATED set for a large object
1310 that has been evacuated, or unset otherwise.
1311 -------------------------------------------------------------------------- */
1315 evacuate_large(StgPtr p)
1317 bdescr *bd = Bdescr(p);
1320 // should point to the beginning of the block
1321 ASSERT(((W_)p & BLOCK_MASK) == 0);
1323 // already evacuated?
1324 if (bd->flags & BF_EVACUATED) {
1325 /* Don't forget to set the failed_to_evac flag if we didn't get
1326 * the desired destination (see comments in evacuate()).
1328 if (bd->gen_no < evac_gen) {
1329 failed_to_evac = rtsTrue;
1330 TICK_GC_FAILED_PROMOTION();
1336 // remove from large_object list
1338 bd->u.back->link = bd->link;
1339 } else { // first object in the list
1340 stp->large_objects = bd->link;
1343 bd->link->u.back = bd->u.back;
1346 /* link it on to the evacuated large object list of the destination step
1349 if (stp->gen_no < evac_gen) {
1350 #ifdef NO_EAGER_PROMOTION
1351 failed_to_evac = rtsTrue;
1353 stp = &generations[evac_gen].steps[0];
1358 bd->gen_no = stp->gen_no;
1359 bd->link = stp->new_large_objects;
1360 stp->new_large_objects = bd;
1361 bd->flags |= BF_EVACUATED;
1364 /* -----------------------------------------------------------------------------
1365 Adding a MUT_CONS to an older generation.
1367 This is necessary from time to time when we end up with an
1368 old-to-new generation pointer in a non-mutable object. We defer
1369 the promotion until the next GC.
1370 -------------------------------------------------------------------------- */
1374 mkMutCons(StgClosure *ptr, generation *gen)
1379 stp = &gen->steps[0];
1381 /* chain a new block onto the to-space for the destination step if
1384 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1388 q = (StgMutVar *)stp->hp;
1389 stp->hp += sizeofW(StgMutVar);
1391 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1393 recordOldToNewPtrs((StgMutClosure *)q);
1395 return (StgClosure *)q;
1398 /* -----------------------------------------------------------------------------
1401 This is called (eventually) for every live object in the system.
1403 The caller to evacuate specifies a desired generation in the
1404 evac_gen global variable. The following conditions apply to
1405 evacuating an object which resides in generation M when we're
1406 collecting up to generation N
1410 else evac to step->to
1412 if M < evac_gen evac to evac_gen, step 0
1414 if the object is already evacuated, then we check which generation
1417 if M >= evac_gen do nothing
1418 if M < evac_gen set failed_to_evac flag to indicate that we
1419 didn't manage to evacuate this object into evac_gen.
1421 -------------------------------------------------------------------------- */
1424 evacuate(StgClosure *q)
1429 const StgInfoTable *info;
1432 if (HEAP_ALLOCED(q)) {
1435 if (bd->gen_no > N) {
1436 /* Can't evacuate this object, because it's in a generation
1437 * older than the ones we're collecting. Let's hope that it's
1438 * in evac_gen or older, or we will have to arrange to track
1439 * this pointer using the mutable list.
1441 if (bd->gen_no < evac_gen) {
1443 failed_to_evac = rtsTrue;
1444 TICK_GC_FAILED_PROMOTION();
1449 /* evacuate large objects by re-linking them onto a different list.
1451 if (bd->flags & BF_LARGE) {
1453 if (info->type == TSO &&
1454 ((StgTSO *)q)->what_next == ThreadRelocated) {
1455 q = (StgClosure *)((StgTSO *)q)->link;
1458 evacuate_large((P_)q);
1462 /* If the object is in a step that we're compacting, then we
1463 * need to use an alternative evacuate procedure.
1465 if (bd->step->is_compacted) {
1466 if (!is_marked((P_)q,bd)) {
1468 if (mark_stack_full()) {
1469 barf("ToDo: mark stack full");
1471 push_mark_stack((P_)q);
1479 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1482 // make sure the info pointer is into text space
1483 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1484 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1487 switch (info -> type) {
1491 to = copy(q,sizeW_fromITBL(info),stp);
1496 StgWord w = (StgWord)q->payload[0];
1497 if (q->header.info == Czh_con_info &&
1498 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1499 (StgChar)w <= MAX_CHARLIKE) {
1500 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1502 if (q->header.info == Izh_con_info &&
1503 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1504 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1506 // else, fall through ...
1512 return copy(q,sizeofW(StgHeader)+1,stp);
1514 case THUNK_1_0: // here because of MIN_UPD_SIZE
1519 #ifdef NO_PROMOTE_THUNKS
1520 if (bd->gen_no == 0 &&
1521 bd->step->no != 0 &&
1522 bd->step->no == generations[bd->gen_no].n_steps-1) {
1526 return copy(q,sizeofW(StgHeader)+2,stp);
1534 return copy(q,sizeofW(StgHeader)+2,stp);
1540 case IND_OLDGEN_PERM:
1545 return copy(q,sizeW_fromITBL(info),stp);
1548 case SE_CAF_BLACKHOLE:
1551 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1554 to = copy(q,BLACKHOLE_sizeW(),stp);
1557 case THUNK_SELECTOR:
1559 const StgInfoTable* selectee_info;
1560 StgClosure* selectee = ((StgSelector*)q)->selectee;
1563 selectee_info = get_itbl(selectee);
1564 switch (selectee_info->type) {
1573 StgWord offset = info->layout.selector_offset;
1575 // check that the size is in range
1577 (StgWord32)(selectee_info->layout.payload.ptrs +
1578 selectee_info->layout.payload.nptrs));
1580 // perform the selection!
1581 q = selectee->payload[offset];
1583 /* if we're already in to-space, there's no need to continue
1584 * with the evacuation, just update the source address with
1585 * a pointer to the (evacuated) constructor field.
1587 if (HEAP_ALLOCED(q)) {
1588 bdescr *bd = Bdescr((P_)q);
1589 if (bd->flags & BF_EVACUATED) {
1590 if (bd->gen_no < evac_gen) {
1591 failed_to_evac = rtsTrue;
1592 TICK_GC_FAILED_PROMOTION();
1598 /* otherwise, carry on and evacuate this constructor field,
1599 * (but not the constructor itself)
1608 case IND_OLDGEN_PERM:
1609 selectee = ((StgInd *)selectee)->indirectee;
1613 selectee = ((StgEvacuated *)selectee)->evacuee;
1616 case THUNK_SELECTOR:
1618 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1619 something) to go into an infinite loop when the nightly
1620 stage2 compiles PrelTup.lhs. */
1622 /* we can't recurse indefinitely in evacuate(), so set a
1623 * limit on the number of times we can go around this
1626 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1628 bd = Bdescr((P_)selectee);
1629 if (!bd->flags & BF_EVACUATED) {
1630 thunk_selector_depth++;
1631 selectee = evacuate(selectee);
1632 thunk_selector_depth--;
1636 // otherwise, fall through...
1648 case SE_CAF_BLACKHOLE:
1652 // not evaluated yet
1656 // a copy of the top-level cases below
1657 case RBH: // cf. BLACKHOLE_BQ
1659 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1660 to = copy(q,BLACKHOLE_sizeW(),stp);
1661 //ToDo: derive size etc from reverted IP
1662 //to = copy(q,size,stp);
1663 // recordMutable((StgMutClosure *)to);
1668 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1669 to = copy(q,sizeofW(StgBlockedFetch),stp);
1676 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1677 to = copy(q,sizeofW(StgFetchMe),stp);
1681 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1682 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1687 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1688 (int)(selectee_info->type));
1691 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1695 // follow chains of indirections, don't evacuate them
1696 q = ((StgInd*)q)->indirectee;
1700 if (info->srt_len > 0 && major_gc &&
1701 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1702 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1703 static_objects = (StgClosure *)q;
1708 if (info->srt_len > 0 && major_gc &&
1709 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1710 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1711 static_objects = (StgClosure *)q;
1716 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1717 * on the CAF list, so don't do anything with it here (we'll
1718 * scavenge it later).
1721 && ((StgIndStatic *)q)->saved_info == NULL
1722 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1723 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1724 static_objects = (StgClosure *)q;
1729 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1730 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1731 static_objects = (StgClosure *)q;
1735 case CONSTR_INTLIKE:
1736 case CONSTR_CHARLIKE:
1737 case CONSTR_NOCAF_STATIC:
1738 /* no need to put these on the static linked list, they don't need
1753 // shouldn't see these
1754 barf("evacuate: stack frame at %p\n", q);
1758 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1759 * of stack, tagging and all.
1761 return copy(q,pap_sizeW((StgPAP*)q),stp);
1764 /* Already evacuated, just return the forwarding address.
1765 * HOWEVER: if the requested destination generation (evac_gen) is
1766 * older than the actual generation (because the object was
1767 * already evacuated to a younger generation) then we have to
1768 * set the failed_to_evac flag to indicate that we couldn't
1769 * manage to promote the object to the desired generation.
1771 if (evac_gen > 0) { // optimisation
1772 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1773 if (Bdescr((P_)p)->gen_no < evac_gen) {
1774 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1775 failed_to_evac = rtsTrue;
1776 TICK_GC_FAILED_PROMOTION();
1779 return ((StgEvacuated*)q)->evacuee;
1782 // just copy the block
1783 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1786 case MUT_ARR_PTRS_FROZEN:
1787 // just copy the block
1788 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1792 StgTSO *tso = (StgTSO *)q;
1794 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1796 if (tso->what_next == ThreadRelocated) {
1797 q = (StgClosure *)tso->link;
1801 /* To evacuate a small TSO, we need to relocate the update frame
1805 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1806 move_TSO(tso, new_tso);
1807 return (StgClosure *)new_tso;
1812 case RBH: // cf. BLACKHOLE_BQ
1814 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1815 to = copy(q,BLACKHOLE_sizeW(),stp);
1816 //ToDo: derive size etc from reverted IP
1817 //to = copy(q,size,stp);
1819 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1820 q, info_type(q), to, info_type(to)));
1825 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1826 to = copy(q,sizeofW(StgBlockedFetch),stp);
1828 belch("@@ evacuate: %p (%s) to %p (%s)",
1829 q, info_type(q), to, info_type(to)));
1836 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1837 to = copy(q,sizeofW(StgFetchMe),stp);
1839 belch("@@ evacuate: %p (%s) to %p (%s)",
1840 q, info_type(q), to, info_type(to)));
1844 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1845 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1847 belch("@@ evacuate: %p (%s) to %p (%s)",
1848 q, info_type(q), to, info_type(to)));
1853 barf("evacuate: strange closure type %d", (int)(info->type));
1859 /* -----------------------------------------------------------------------------
1860 move_TSO is called to update the TSO structure after it has been
1861 moved from one place to another.
1862 -------------------------------------------------------------------------- */
1865 move_TSO(StgTSO *src, StgTSO *dest)
1869 // relocate the stack pointers...
1870 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1871 dest->sp = (StgPtr)dest->sp + diff;
1872 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1874 relocate_stack(dest, diff);
1877 /* -----------------------------------------------------------------------------
1878 relocate_stack is called to update the linkage between
1879 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1881 -------------------------------------------------------------------------- */
1884 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1892 while ((P_)su < dest->stack + dest->stack_size) {
1893 switch (get_itbl(su)->type) {
1895 // GCC actually manages to common up these three cases!
1898 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1903 cf = (StgCatchFrame *)su;
1904 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1909 sf = (StgSeqFrame *)su;
1910 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1919 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1930 scavenge_srt(const StgInfoTable *info)
1932 StgClosure **srt, **srt_end;
1934 /* evacuate the SRT. If srt_len is zero, then there isn't an
1935 * srt field in the info table. That's ok, because we'll
1936 * never dereference it.
1938 srt = (StgClosure **)(info->srt);
1939 srt_end = srt + info->srt_len;
1940 for (; srt < srt_end; srt++) {
1941 /* Special-case to handle references to closures hiding out in DLLs, since
1942 double indirections required to get at those. The code generator knows
1943 which is which when generating the SRT, so it stores the (indirect)
1944 reference to the DLL closure in the table by first adding one to it.
1945 We check for this here, and undo the addition before evacuating it.
1947 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1948 closure that's fixed at link-time, and no extra magic is required.
1950 #ifdef ENABLE_WIN32_DLL_SUPPORT
1951 if ( (unsigned long)(*srt) & 0x1 ) {
1952 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1962 /* -----------------------------------------------------------------------------
1964 -------------------------------------------------------------------------- */
1967 scavengeTSO (StgTSO *tso)
1969 // chase the link field for any TSOs on the same queue
1970 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1971 if ( tso->why_blocked == BlockedOnMVar
1972 || tso->why_blocked == BlockedOnBlackHole
1973 || tso->why_blocked == BlockedOnException
1975 || tso->why_blocked == BlockedOnGA
1976 || tso->why_blocked == BlockedOnGA_NoSend
1979 tso->block_info.closure = evacuate(tso->block_info.closure);
1981 if ( tso->blocked_exceptions != NULL ) {
1982 tso->blocked_exceptions =
1983 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1985 // scavenge this thread's stack
1986 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1989 /* -----------------------------------------------------------------------------
1990 Scavenge a given step until there are no more objects in this step
1993 evac_gen is set by the caller to be either zero (for a step in a
1994 generation < N) or G where G is the generation of the step being
1997 We sometimes temporarily change evac_gen back to zero if we're
1998 scavenging a mutable object where early promotion isn't such a good
2000 -------------------------------------------------------------------------- */
2008 nat saved_evac_gen = evac_gen;
2013 failed_to_evac = rtsFalse;
2015 /* scavenge phase - standard breadth-first scavenging of the
2019 while (bd != stp->hp_bd || p < stp->hp) {
2021 // If we're at the end of this block, move on to the next block
2022 if (bd != stp->hp_bd && p == bd->free) {
2028 info = get_itbl((StgClosure *)p);
2029 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2032 switch (info->type) {
2035 /* treat MVars specially, because we don't want to evacuate the
2036 * mut_link field in the middle of the closure.
2039 StgMVar *mvar = ((StgMVar *)p);
2041 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2042 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2043 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2044 evac_gen = saved_evac_gen;
2045 recordMutable((StgMutClosure *)mvar);
2046 failed_to_evac = rtsFalse; // mutable.
2047 p += sizeofW(StgMVar);
2055 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2056 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2057 p += sizeofW(StgHeader) + 2;
2062 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2063 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2069 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2070 p += sizeofW(StgHeader) + 1;
2075 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2081 p += sizeofW(StgHeader) + 1;
2088 p += sizeofW(StgHeader) + 2;
2095 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2096 p += sizeofW(StgHeader) + 2;
2112 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2113 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2114 (StgClosure *)*p = evacuate((StgClosure *)*p);
2116 p += info->layout.payload.nptrs;
2121 if (stp->gen_no != 0) {
2122 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2125 case IND_OLDGEN_PERM:
2126 ((StgIndOldGen *)p)->indirectee =
2127 evacuate(((StgIndOldGen *)p)->indirectee);
2128 if (failed_to_evac) {
2129 failed_to_evac = rtsFalse;
2130 recordOldToNewPtrs((StgMutClosure *)p);
2132 p += sizeofW(StgIndOldGen);
2137 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2138 evac_gen = saved_evac_gen;
2139 recordMutable((StgMutClosure *)p);
2140 failed_to_evac = rtsFalse; // mutable anyhow
2141 p += sizeofW(StgMutVar);
2146 failed_to_evac = rtsFalse; // mutable anyhow
2147 p += sizeofW(StgMutVar);
2151 case SE_CAF_BLACKHOLE:
2154 p += BLACKHOLE_sizeW();
2159 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2160 (StgClosure *)bh->blocking_queue =
2161 evacuate((StgClosure *)bh->blocking_queue);
2162 recordMutable((StgMutClosure *)bh);
2163 failed_to_evac = rtsFalse;
2164 p += BLACKHOLE_sizeW();
2168 case THUNK_SELECTOR:
2170 StgSelector *s = (StgSelector *)p;
2171 s->selectee = evacuate(s->selectee);
2172 p += THUNK_SELECTOR_sizeW();
2176 case AP_UPD: // same as PAPs
2178 /* Treat a PAP just like a section of stack, not forgetting to
2179 * evacuate the function pointer too...
2182 StgPAP* pap = (StgPAP *)p;
2184 pap->fun = evacuate(pap->fun);
2185 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2186 p += pap_sizeW(pap);
2191 // nothing to follow
2192 p += arr_words_sizeW((StgArrWords *)p);
2196 // follow everything
2200 evac_gen = 0; // repeatedly mutable
2201 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2202 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2203 (StgClosure *)*p = evacuate((StgClosure *)*p);
2205 evac_gen = saved_evac_gen;
2206 recordMutable((StgMutClosure *)q);
2207 failed_to_evac = rtsFalse; // mutable anyhow.
2211 case MUT_ARR_PTRS_FROZEN:
2212 // follow everything
2216 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2217 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2218 (StgClosure *)*p = evacuate((StgClosure *)*p);
2220 // it's tempting to recordMutable() if failed_to_evac is
2221 // false, but that breaks some assumptions (eg. every
2222 // closure on the mutable list is supposed to have the MUT
2223 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2229 StgTSO *tso = (StgTSO *)p;
2232 evac_gen = saved_evac_gen;
2233 recordMutable((StgMutClosure *)tso);
2234 failed_to_evac = rtsFalse; // mutable anyhow.
2235 p += tso_sizeW(tso);
2240 case RBH: // cf. BLACKHOLE_BQ
2243 nat size, ptrs, nonptrs, vhs;
2245 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2247 StgRBH *rbh = (StgRBH *)p;
2248 (StgClosure *)rbh->blocking_queue =
2249 evacuate((StgClosure *)rbh->blocking_queue);
2250 recordMutable((StgMutClosure *)to);
2251 failed_to_evac = rtsFalse; // mutable anyhow.
2253 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2254 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2255 // ToDo: use size of reverted closure here!
2256 p += BLACKHOLE_sizeW();
2262 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2263 // follow the pointer to the node which is being demanded
2264 (StgClosure *)bf->node =
2265 evacuate((StgClosure *)bf->node);
2266 // follow the link to the rest of the blocking queue
2267 (StgClosure *)bf->link =
2268 evacuate((StgClosure *)bf->link);
2269 if (failed_to_evac) {
2270 failed_to_evac = rtsFalse;
2271 recordMutable((StgMutClosure *)bf);
2274 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2275 bf, info_type((StgClosure *)bf),
2276 bf->node, info_type(bf->node)));
2277 p += sizeofW(StgBlockedFetch);
2285 p += sizeofW(StgFetchMe);
2286 break; // nothing to do in this case
2288 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2290 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2291 (StgClosure *)fmbq->blocking_queue =
2292 evacuate((StgClosure *)fmbq->blocking_queue);
2293 if (failed_to_evac) {
2294 failed_to_evac = rtsFalse;
2295 recordMutable((StgMutClosure *)fmbq);
2298 belch("@@ scavenge: %p (%s) exciting, isn't it",
2299 p, info_type((StgClosure *)p)));
2300 p += sizeofW(StgFetchMeBlockingQueue);
2306 barf("scavenge: unimplemented/strange closure type %d @ %p",
2310 /* If we didn't manage to promote all the objects pointed to by
2311 * the current object, then we have to designate this object as
2312 * mutable (because it contains old-to-new generation pointers).
2314 if (failed_to_evac) {
2315 failed_to_evac = rtsFalse;
2316 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2324 /* -----------------------------------------------------------------------------
2325 Scavenge everything on the mark stack.
2327 This is slightly different from scavenge():
2328 - we don't walk linearly through the objects, so the scavenger
2329 doesn't need to advance the pointer on to the next object.
2330 -------------------------------------------------------------------------- */
2333 scavenge_mark_stack(void)
2339 evac_gen = oldest_gen->no;
2340 saved_evac_gen = evac_gen;
2342 while (!mark_stack_empty()) {
2343 p = pop_mark_stack();
2345 info = get_itbl((StgClosure *)p);
2346 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2348 switch (info->type) {
2351 /* treat MVars specially, because we don't want to evacuate the
2352 * mut_link field in the middle of the closure.
2355 StgMVar *mvar = ((StgMVar *)p);
2357 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2358 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2359 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2360 evac_gen = saved_evac_gen;
2361 failed_to_evac = rtsFalse; // mutable.
2369 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2370 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2371 mark(p+1,Bdescr(p));
2381 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2382 mark(p+1,Bdescr(p));
2392 mark(p+1,Bdescr(p));
2408 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2409 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2410 (StgClosure *)*p = evacuate((StgClosure *)*p);
2416 // don't need to do anything here: the only possible case
2417 // is that we're in a 1-space compacting collector, with
2418 // no "old" generation.
2422 case IND_OLDGEN_PERM:
2423 ((StgIndOldGen *)p)->indirectee =
2424 evacuate(((StgIndOldGen *)p)->indirectee);
2425 if (failed_to_evac) {
2426 recordOldToNewPtrs((StgMutClosure *)p);
2428 failed_to_evac = rtsFalse;
2429 mark(p+1,Bdescr(p));
2434 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2435 evac_gen = saved_evac_gen;
2436 failed_to_evac = rtsFalse;
2437 mark(p+1,Bdescr(p));
2442 failed_to_evac = rtsFalse;
2446 case SE_CAF_BLACKHOLE:
2454 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2455 (StgClosure *)bh->blocking_queue =
2456 evacuate((StgClosure *)bh->blocking_queue);
2457 failed_to_evac = rtsFalse;
2461 case THUNK_SELECTOR:
2463 StgSelector *s = (StgSelector *)p;
2464 s->selectee = evacuate(s->selectee);
2468 case AP_UPD: // same as PAPs
2470 /* Treat a PAP just like a section of stack, not forgetting to
2471 * evacuate the function pointer too...
2474 StgPAP* pap = (StgPAP *)p;
2476 pap->fun = evacuate(pap->fun);
2477 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2482 // follow everything
2486 evac_gen = 0; // repeatedly mutable
2487 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2488 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2489 (StgClosure *)*p = evacuate((StgClosure *)*p);
2491 evac_gen = saved_evac_gen;
2492 failed_to_evac = rtsFalse; // mutable anyhow.
2496 case MUT_ARR_PTRS_FROZEN:
2497 // follow everything
2501 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2502 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2503 (StgClosure *)*p = evacuate((StgClosure *)*p);
2510 StgTSO *tso = (StgTSO *)p;
2513 evac_gen = saved_evac_gen;
2514 failed_to_evac = rtsFalse;
2519 case RBH: // cf. BLACKHOLE_BQ
2522 nat size, ptrs, nonptrs, vhs;
2524 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2526 StgRBH *rbh = (StgRBH *)p;
2527 (StgClosure *)rbh->blocking_queue =
2528 evacuate((StgClosure *)rbh->blocking_queue);
2529 recordMutable((StgMutClosure *)rbh);
2530 failed_to_evac = rtsFalse; // mutable anyhow.
2532 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2533 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2539 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2540 // follow the pointer to the node which is being demanded
2541 (StgClosure *)bf->node =
2542 evacuate((StgClosure *)bf->node);
2543 // follow the link to the rest of the blocking queue
2544 (StgClosure *)bf->link =
2545 evacuate((StgClosure *)bf->link);
2546 if (failed_to_evac) {
2547 failed_to_evac = rtsFalse;
2548 recordMutable((StgMutClosure *)bf);
2551 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2552 bf, info_type((StgClosure *)bf),
2553 bf->node, info_type(bf->node)));
2561 break; // nothing to do in this case
2563 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2565 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2566 (StgClosure *)fmbq->blocking_queue =
2567 evacuate((StgClosure *)fmbq->blocking_queue);
2568 if (failed_to_evac) {
2569 failed_to_evac = rtsFalse;
2570 recordMutable((StgMutClosure *)fmbq);
2573 belch("@@ scavenge: %p (%s) exciting, isn't it",
2574 p, info_type((StgClosure *)p)));
2580 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2584 if (failed_to_evac) {
2585 failed_to_evac = rtsFalse;
2586 mkMutCons((StgClosure *)p, &generations[evac_gen]);
2589 } // while (!mark_stack_empty())
2592 /* -----------------------------------------------------------------------------
2593 Scavenge one object.
2595 This is used for objects that are temporarily marked as mutable
2596 because they contain old-to-new generation pointers. Only certain
2597 objects can have this property.
2598 -------------------------------------------------------------------------- */
2601 scavenge_one(StgClosure *p)
2603 const StgInfoTable *info;
2606 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2607 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2611 switch (info -> type) {
2614 case FUN_1_0: // hardly worth specialising these guys
2634 case IND_OLDGEN_PERM:
2638 end = (P_)p->payload + info->layout.payload.ptrs;
2639 for (q = (P_)p->payload; q < end; q++) {
2640 (StgClosure *)*q = evacuate((StgClosure *)*q);
2646 case SE_CAF_BLACKHOLE:
2651 case THUNK_SELECTOR:
2653 StgSelector *s = (StgSelector *)p;
2654 s->selectee = evacuate(s->selectee);
2658 case AP_UPD: /* same as PAPs */
2660 /* Treat a PAP just like a section of stack, not forgetting to
2661 * evacuate the function pointer too...
2664 StgPAP* pap = (StgPAP *)p;
2666 pap->fun = evacuate(pap->fun);
2667 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2672 /* This might happen if for instance a MUT_CONS was pointing to a
2673 * THUNK which has since been updated. The IND_OLDGEN will
2674 * be on the mutable list anyway, so we don't need to do anything
2679 case MUT_ARR_PTRS_FROZEN:
2681 // follow everything
2685 next = q + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2686 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < next; q++) {
2687 (StgClosure *)*q = evacuate((StgClosure *)*q);
2693 barf("scavenge_one: strange object %d", (int)(info->type));
2696 no_luck = failed_to_evac;
2697 failed_to_evac = rtsFalse;
2701 /* -----------------------------------------------------------------------------
2702 Scavenging mutable lists.
2704 We treat the mutable list of each generation > N (i.e. all the
2705 generations older than the one being collected) as roots. We also
2706 remove non-mutable objects from the mutable list at this point.
2707 -------------------------------------------------------------------------- */
2710 scavenge_mut_once_list(generation *gen)
2712 const StgInfoTable *info;
2713 StgMutClosure *p, *next, *new_list;
2715 p = gen->mut_once_list;
2716 new_list = END_MUT_LIST;
2720 failed_to_evac = rtsFalse;
2722 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2724 // make sure the info pointer is into text space
2725 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2726 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2730 if (info->type==RBH)
2731 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2733 switch(info->type) {
2736 case IND_OLDGEN_PERM:
2738 /* Try to pull the indirectee into this generation, so we can
2739 * remove the indirection from the mutable list.
2741 ((StgIndOldGen *)p)->indirectee =
2742 evacuate(((StgIndOldGen *)p)->indirectee);
2744 #if 0 && defined(DEBUG)
2745 if (RtsFlags.DebugFlags.gc)
2746 /* Debugging code to print out the size of the thing we just
2750 StgPtr start = gen->steps[0].scan;
2751 bdescr *start_bd = gen->steps[0].scan_bd;
2753 scavenge(&gen->steps[0]);
2754 if (start_bd != gen->steps[0].scan_bd) {
2755 size += (P_)BLOCK_ROUND_UP(start) - start;
2756 start_bd = start_bd->link;
2757 while (start_bd != gen->steps[0].scan_bd) {
2758 size += BLOCK_SIZE_W;
2759 start_bd = start_bd->link;
2761 size += gen->steps[0].scan -
2762 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2764 size = gen->steps[0].scan - start;
2766 fprintf(stderr,"evac IND_OLDGEN: %ld bytes\n", size * sizeof(W_));
2770 /* failed_to_evac might happen if we've got more than two
2771 * generations, we're collecting only generation 0, the
2772 * indirection resides in generation 2 and the indirectee is
2775 if (failed_to_evac) {
2776 failed_to_evac = rtsFalse;
2777 p->mut_link = new_list;
2780 /* the mut_link field of an IND_STATIC is overloaded as the
2781 * static link field too (it just so happens that we don't need
2782 * both at the same time), so we need to NULL it out when
2783 * removing this object from the mutable list because the static
2784 * link fields are all assumed to be NULL before doing a major
2792 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2793 * it from the mutable list if possible by promoting whatever it
2796 scavenge_one((StgClosure *)((StgMutVar *)p)->var);
2797 if (failed_to_evac == rtsTrue) {
2798 /* didn't manage to promote everything, so put the
2799 * MUT_CONS back on the list.
2801 failed_to_evac = rtsFalse;
2802 p->mut_link = new_list;
2808 // shouldn't have anything else on the mutables list
2809 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2813 gen->mut_once_list = new_list;
2818 scavenge_mutable_list(generation *gen)
2820 const StgInfoTable *info;
2821 StgMutClosure *p, *next;
2823 p = gen->saved_mut_list;
2827 failed_to_evac = rtsFalse;
2829 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2831 // make sure the info pointer is into text space
2832 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2833 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2837 if (info->type==RBH)
2838 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2840 switch(info->type) {
2843 // follow everything
2844 p->mut_link = gen->mut_list;
2849 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2850 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2851 (StgClosure *)*q = evacuate((StgClosure *)*q);
2856 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2857 case MUT_ARR_PTRS_FROZEN:
2862 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2863 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2864 (StgClosure *)*q = evacuate((StgClosure *)*q);
2868 if (failed_to_evac) {
2869 failed_to_evac = rtsFalse;
2870 mkMutCons((StgClosure *)p, gen);
2876 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2877 p->mut_link = gen->mut_list;
2883 StgMVar *mvar = (StgMVar *)p;
2884 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2885 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2886 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2887 p->mut_link = gen->mut_list;
2894 StgTSO *tso = (StgTSO *)p;
2898 /* Don't take this TSO off the mutable list - it might still
2899 * point to some younger objects (because we set evac_gen to 0
2902 tso->mut_link = gen->mut_list;
2903 gen->mut_list = (StgMutClosure *)tso;
2909 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2910 (StgClosure *)bh->blocking_queue =
2911 evacuate((StgClosure *)bh->blocking_queue);
2912 p->mut_link = gen->mut_list;
2917 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2920 case IND_OLDGEN_PERM:
2921 /* Try to pull the indirectee into this generation, so we can
2922 * remove the indirection from the mutable list.
2925 ((StgIndOldGen *)p)->indirectee =
2926 evacuate(((StgIndOldGen *)p)->indirectee);
2929 if (failed_to_evac) {
2930 failed_to_evac = rtsFalse;
2931 p->mut_link = gen->mut_once_list;
2932 gen->mut_once_list = p;
2939 // HWL: check whether all of these are necessary
2941 case RBH: // cf. BLACKHOLE_BQ
2943 // nat size, ptrs, nonptrs, vhs;
2945 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2946 StgRBH *rbh = (StgRBH *)p;
2947 (StgClosure *)rbh->blocking_queue =
2948 evacuate((StgClosure *)rbh->blocking_queue);
2949 if (failed_to_evac) {
2950 failed_to_evac = rtsFalse;
2951 recordMutable((StgMutClosure *)rbh);
2953 // ToDo: use size of reverted closure here!
2954 p += BLACKHOLE_sizeW();
2960 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2961 // follow the pointer to the node which is being demanded
2962 (StgClosure *)bf->node =
2963 evacuate((StgClosure *)bf->node);
2964 // follow the link to the rest of the blocking queue
2965 (StgClosure *)bf->link =
2966 evacuate((StgClosure *)bf->link);
2967 if (failed_to_evac) {
2968 failed_to_evac = rtsFalse;
2969 recordMutable((StgMutClosure *)bf);
2971 p += sizeofW(StgBlockedFetch);
2977 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
2980 p += sizeofW(StgFetchMe);
2981 break; // nothing to do in this case
2983 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2985 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2986 (StgClosure *)fmbq->blocking_queue =
2987 evacuate((StgClosure *)fmbq->blocking_queue);
2988 if (failed_to_evac) {
2989 failed_to_evac = rtsFalse;
2990 recordMutable((StgMutClosure *)fmbq);
2992 p += sizeofW(StgFetchMeBlockingQueue);
2998 // shouldn't have anything else on the mutables list
2999 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3006 scavenge_static(void)
3008 StgClosure* p = static_objects;
3009 const StgInfoTable *info;
3011 /* Always evacuate straight to the oldest generation for static
3013 evac_gen = oldest_gen->no;
3015 /* keep going until we've scavenged all the objects on the linked
3017 while (p != END_OF_STATIC_LIST) {
3021 if (info->type==RBH)
3022 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3024 // make sure the info pointer is into text space
3025 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3026 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3028 /* Take this object *off* the static_objects list,
3029 * and put it on the scavenged_static_objects list.
3031 static_objects = STATIC_LINK(info,p);
3032 STATIC_LINK(info,p) = scavenged_static_objects;
3033 scavenged_static_objects = p;
3035 switch (info -> type) {
3039 StgInd *ind = (StgInd *)p;
3040 ind->indirectee = evacuate(ind->indirectee);
3042 /* might fail to evacuate it, in which case we have to pop it
3043 * back on the mutable list (and take it off the
3044 * scavenged_static list because the static link and mut link
3045 * pointers are one and the same).
3047 if (failed_to_evac) {
3048 failed_to_evac = rtsFalse;
3049 scavenged_static_objects = STATIC_LINK(info,p);
3050 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3051 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3065 next = (P_)p->payload + info->layout.payload.ptrs;
3066 // evacuate the pointers
3067 for (q = (P_)p->payload; q < next; q++) {
3068 (StgClosure *)*q = evacuate((StgClosure *)*q);
3074 barf("scavenge_static: strange closure %d", (int)(info->type));
3077 ASSERT(failed_to_evac == rtsFalse);
3079 /* get the next static object from the list. Remember, there might
3080 * be more stuff on this list now that we've done some evacuating!
3081 * (static_objects is a global)
3087 /* -----------------------------------------------------------------------------
3088 scavenge_stack walks over a section of stack and evacuates all the
3089 objects pointed to by it. We can use the same code for walking
3090 PAPs, since these are just sections of copied stack.
3091 -------------------------------------------------------------------------- */
3094 scavenge_stack(StgPtr p, StgPtr stack_end)
3097 const StgInfoTable* info;
3100 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3103 * Each time around this loop, we are looking at a chunk of stack
3104 * that starts with either a pending argument section or an
3105 * activation record.
3108 while (p < stack_end) {
3111 // If we've got a tag, skip over that many words on the stack
3112 if (IS_ARG_TAG((W_)q)) {
3117 /* Is q a pointer to a closure?
3119 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3121 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3122 ASSERT(closure_STATIC((StgClosure *)q));
3124 // otherwise, must be a pointer into the allocation space.
3127 (StgClosure *)*p = evacuate((StgClosure *)q);
3133 * Otherwise, q must be the info pointer of an activation
3134 * record. All activation records have 'bitmap' style layout
3137 info = get_itbl((StgClosure *)p);
3139 switch (info->type) {
3141 // Dynamic bitmap: the mask is stored on the stack
3143 bitmap = ((StgRetDyn *)p)->liveness;
3144 p = (P_)&((StgRetDyn *)p)->payload[0];
3147 // probably a slow-entry point return address:
3155 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3156 old_p, p, old_p+1));
3158 p++; // what if FHS!=1 !? -- HWL
3163 /* Specialised code for update frames, since they're so common.
3164 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3165 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3169 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3171 p += sizeofW(StgUpdateFrame);
3174 frame->updatee = evacuate(frame->updatee);
3176 #else // specialised code for update frames, not sure if it's worth it.
3178 nat type = get_itbl(frame->updatee)->type;
3180 if (type == EVACUATED) {
3181 frame->updatee = evacuate(frame->updatee);
3184 bdescr *bd = Bdescr((P_)frame->updatee);
3186 if (bd->gen_no > N) {
3187 if (bd->gen_no < evac_gen) {
3188 failed_to_evac = rtsTrue;
3193 // Don't promote blackholes
3195 if (!(stp->gen_no == 0 &&
3197 stp->no == stp->gen->n_steps-1)) {
3204 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3205 sizeofW(StgHeader), stp);
3206 frame->updatee = to;
3209 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3210 frame->updatee = to;
3211 recordMutable((StgMutClosure *)to);
3214 /* will never be SE_{,CAF_}BLACKHOLE, since we
3215 don't push an update frame for single-entry thunks. KSW 1999-01. */
3216 barf("scavenge_stack: UPDATE_FRAME updatee");
3222 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3229 bitmap = info->layout.bitmap;
3231 // this assumes that the payload starts immediately after the info-ptr
3233 while (bitmap != 0) {
3234 if ((bitmap & 1) == 0) {
3235 (StgClosure *)*p = evacuate((StgClosure *)*p);
3238 bitmap = bitmap >> 1;
3245 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3250 StgLargeBitmap *large_bitmap;
3253 large_bitmap = info->layout.large_bitmap;
3256 for (i=0; i<large_bitmap->size; i++) {
3257 bitmap = large_bitmap->bitmap[i];
3258 q = p + BITS_IN(W_);
3259 while (bitmap != 0) {
3260 if ((bitmap & 1) == 0) {
3261 (StgClosure *)*p = evacuate((StgClosure *)*p);
3264 bitmap = bitmap >> 1;
3266 if (i+1 < large_bitmap->size) {
3268 (StgClosure *)*p = evacuate((StgClosure *)*p);
3274 // and don't forget to follow the SRT
3279 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3284 /*-----------------------------------------------------------------------------
3285 scavenge the large object list.
3287 evac_gen set by caller; similar games played with evac_gen as with
3288 scavenge() - see comment at the top of scavenge(). Most large
3289 objects are (repeatedly) mutable, so most of the time evac_gen will
3291 --------------------------------------------------------------------------- */
3294 scavenge_large(step *stp)
3298 const StgInfoTable* info;
3299 nat saved_evac_gen = evac_gen; // used for temporarily changing evac_gen
3301 bd = stp->new_large_objects;
3303 for (; bd != NULL; bd = stp->new_large_objects) {
3305 /* take this object *off* the large objects list and put it on
3306 * the scavenged large objects list. This is so that we can
3307 * treat new_large_objects as a stack and push new objects on
3308 * the front when evacuating.
3310 stp->new_large_objects = bd->link;
3311 dbl_link_onto(bd, &stp->scavenged_large_objects);
3313 // update the block count in this step.
3314 stp->n_scavenged_large_blocks += bd->blocks;
3317 info = get_itbl((StgClosure *)p);
3319 // only certain objects can be "large"...
3321 switch (info->type) {
3324 // nothing to follow
3329 // follow everything
3332 evac_gen = 0; // repeatedly mutable
3333 recordMutable((StgMutClosure *)p);
3334 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3335 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3336 (StgClosure *)*p = evacuate((StgClosure *)*p);
3338 evac_gen = saved_evac_gen;
3339 failed_to_evac = rtsFalse;
3343 case MUT_ARR_PTRS_FROZEN:
3345 // follow everything
3348 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3349 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3350 (StgClosure *)*p = evacuate((StgClosure *)*p);
3357 StgTSO *tso = (StgTSO *)p;
3359 evac_gen = 0; // repeatedly mutable
3361 recordMutable((StgMutClosure *)tso);
3362 evac_gen = saved_evac_gen;
3363 failed_to_evac = rtsFalse;
3370 StgPAP* pap = (StgPAP *)p;
3371 pap->fun = evacuate(pap->fun);
3372 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3377 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3380 if (failed_to_evac) {
3381 failed_to_evac = rtsFalse;
3382 mkMutCons((StgClosure *)q, &generations[evac_gen]);
3387 /* -----------------------------------------------------------------------------
3388 Initialising the static object & mutable lists
3389 -------------------------------------------------------------------------- */
3392 zero_static_object_list(StgClosure* first_static)
3396 const StgInfoTable *info;
3398 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3400 link = STATIC_LINK(info, p);
3401 STATIC_LINK(info,p) = NULL;
3405 /* This function is only needed because we share the mutable link
3406 * field with the static link field in an IND_STATIC, so we have to
3407 * zero the mut_link field before doing a major GC, which needs the
3408 * static link field.
3410 * It doesn't do any harm to zero all the mutable link fields on the
3415 zero_mutable_list( StgMutClosure *first )
3417 StgMutClosure *next, *c;
3419 for (c = first; c != END_MUT_LIST; c = next) {
3425 /* -----------------------------------------------------------------------------
3427 -------------------------------------------------------------------------- */
3434 for (c = (StgIndStatic *)caf_list; c != NULL;
3435 c = (StgIndStatic *)c->static_link)
3437 c->header.info = c->saved_info;
3438 c->saved_info = NULL;
3439 // could, but not necessary: c->static_link = NULL;
3445 scavengeCAFs( void )
3450 for (c = (StgIndStatic *)caf_list; c != NULL;
3451 c = (StgIndStatic *)c->static_link)
3453 c->indirectee = evacuate(c->indirectee);
3457 /* -----------------------------------------------------------------------------
3458 Sanity code for CAF garbage collection.
3460 With DEBUG turned on, we manage a CAF list in addition to the SRT
3461 mechanism. After GC, we run down the CAF list and blackhole any
3462 CAFs which have been garbage collected. This means we get an error
3463 whenever the program tries to enter a garbage collected CAF.
3465 Any garbage collected CAFs are taken off the CAF list at the same
3467 -------------------------------------------------------------------------- */
3469 #if 0 && defined(DEBUG)
3476 const StgInfoTable *info;
3487 ASSERT(info->type == IND_STATIC);
3489 if (STATIC_LINK(info,p) == NULL) {
3490 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04lx\n", (long)p));
3492 SET_INFO(p,&stg_BLACKHOLE_info);
3493 p = STATIC_LINK2(info,p);
3497 pp = &STATIC_LINK2(info,p);
3504 // fprintf(stderr, "%d CAFs live\n", i);
3509 /* -----------------------------------------------------------------------------
3512 Whenever a thread returns to the scheduler after possibly doing
3513 some work, we have to run down the stack and black-hole all the
3514 closures referred to by update frames.
3515 -------------------------------------------------------------------------- */
3518 threadLazyBlackHole(StgTSO *tso)
3520 StgUpdateFrame *update_frame;
3521 StgBlockingQueue *bh;
3524 stack_end = &tso->stack[tso->stack_size];
3525 update_frame = tso->su;
3528 switch (get_itbl(update_frame)->type) {
3531 update_frame = ((StgCatchFrame *)update_frame)->link;
3535 bh = (StgBlockingQueue *)update_frame->updatee;
3537 /* if the thunk is already blackholed, it means we've also
3538 * already blackholed the rest of the thunks on this stack,
3539 * so we can stop early.
3541 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3542 * don't interfere with this optimisation.
3544 if (bh->header.info == &stg_BLACKHOLE_info) {
3548 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3549 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3550 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3551 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3553 SET_INFO(bh,&stg_BLACKHOLE_info);
3556 update_frame = update_frame->link;
3560 update_frame = ((StgSeqFrame *)update_frame)->link;
3566 barf("threadPaused");
3572 /* -----------------------------------------------------------------------------
3575 * Code largely pinched from old RTS, then hacked to bits. We also do
3576 * lazy black holing here.
3578 * -------------------------------------------------------------------------- */
3581 threadSqueezeStack(StgTSO *tso)
3583 lnat displacement = 0;
3584 StgUpdateFrame *frame;
3585 StgUpdateFrame *next_frame; // Temporally next
3586 StgUpdateFrame *prev_frame; // Temporally previous
3588 rtsBool prev_was_update_frame;
3590 StgUpdateFrame *top_frame;
3591 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3593 void printObj( StgClosure *obj ); // from Printer.c
3595 top_frame = tso->su;
3598 bottom = &(tso->stack[tso->stack_size]);
3601 /* There must be at least one frame, namely the STOP_FRAME.
3603 ASSERT((P_)frame < bottom);
3605 /* Walk down the stack, reversing the links between frames so that
3606 * we can walk back up as we squeeze from the bottom. Note that
3607 * next_frame and prev_frame refer to next and previous as they were
3608 * added to the stack, rather than the way we see them in this
3609 * walk. (It makes the next loop less confusing.)
3611 * Stop if we find an update frame pointing to a black hole
3612 * (see comment in threadLazyBlackHole()).
3616 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3617 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3618 prev_frame = frame->link;
3619 frame->link = next_frame;
3624 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3625 printObj((StgClosure *)prev_frame);
3626 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3629 switch (get_itbl(frame)->type) {
3632 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3645 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3647 printObj((StgClosure *)prev_frame);
3650 if (get_itbl(frame)->type == UPDATE_FRAME
3651 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3656 /* Now, we're at the bottom. Frame points to the lowest update
3657 * frame on the stack, and its link actually points to the frame
3658 * above. We have to walk back up the stack, squeezing out empty
3659 * update frames and turning the pointers back around on the way
3662 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3663 * we never want to eliminate it anyway. Just walk one step up
3664 * before starting to squeeze. When you get to the topmost frame,
3665 * remember that there are still some words above it that might have
3672 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3675 * Loop through all of the frames (everything except the very
3676 * bottom). Things are complicated by the fact that we have
3677 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3678 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3680 while (frame != NULL) {
3682 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3683 rtsBool is_update_frame;
3685 next_frame = frame->link;
3686 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3689 * 1. both the previous and current frame are update frames
3690 * 2. the current frame is empty
3692 if (prev_was_update_frame && is_update_frame &&
3693 (P_)prev_frame == frame_bottom + displacement) {
3695 // Now squeeze out the current frame
3696 StgClosure *updatee_keep = prev_frame->updatee;
3697 StgClosure *updatee_bypass = frame->updatee;
3700 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3704 /* Deal with blocking queues. If both updatees have blocked
3705 * threads, then we should merge the queues into the update
3706 * frame that we're keeping.
3708 * Alternatively, we could just wake them up: they'll just go
3709 * straight to sleep on the proper blackhole! This is less code
3710 * and probably less bug prone, although it's probably much
3713 #if 0 // do it properly...
3714 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3715 # error Unimplemented lazy BH warning. (KSW 1999-01)
3717 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3718 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3720 // Sigh. It has one. Don't lose those threads!
3721 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3722 // Urgh. Two queues. Merge them.
3723 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3725 while (keep_tso->link != END_TSO_QUEUE) {
3726 keep_tso = keep_tso->link;
3728 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3731 // For simplicity, just swap the BQ for the BH
3732 P_ temp = updatee_keep;
3734 updatee_keep = updatee_bypass;
3735 updatee_bypass = temp;
3737 // Record the swap in the kept frame (below)
3738 prev_frame->updatee = updatee_keep;
3743 TICK_UPD_SQUEEZED();
3744 /* wasn't there something about update squeezing and ticky to be
3745 * sorted out? oh yes: we aren't counting each enter properly
3746 * in this case. See the log somewhere. KSW 1999-04-21
3748 * Check two things: that the two update frames don't point to
3749 * the same object, and that the updatee_bypass isn't already an
3750 * indirection. Both of these cases only happen when we're in a
3751 * block hole-style loop (and there are multiple update frames
3752 * on the stack pointing to the same closure), but they can both
3753 * screw us up if we don't check.
3755 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3756 // this wakes the threads up
3757 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3760 sp = (P_)frame - 1; // sp = stuff to slide
3761 displacement += sizeofW(StgUpdateFrame);
3764 // No squeeze for this frame
3765 sp = frame_bottom - 1; // Keep the current frame
3767 /* Do lazy black-holing.
3769 if (is_update_frame) {
3770 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3771 if (bh->header.info != &stg_BLACKHOLE_info &&
3772 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3773 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3774 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3775 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3778 /* zero out the slop so that the sanity checker can tell
3779 * where the next closure is.
3782 StgInfoTable *info = get_itbl(bh);
3783 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3784 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3785 * info is used for a different purpose, and it's exactly the
3786 * same size as a BLACKHOLE in any case.
3788 if (info->type != THUNK_SELECTOR) {
3789 for (i = np; i < np + nw; i++) {
3790 ((StgClosure *)bh)->payload[i] = 0;
3795 SET_INFO(bh,&stg_BLACKHOLE_info);
3799 // Fix the link in the current frame (should point to the frame below)
3800 frame->link = prev_frame;
3801 prev_was_update_frame = is_update_frame;
3804 // Now slide all words from sp up to the next frame
3806 if (displacement > 0) {
3807 P_ next_frame_bottom;
3809 if (next_frame != NULL)
3810 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3812 next_frame_bottom = tso->sp - 1;
3816 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3820 while (sp >= next_frame_bottom) {
3821 sp[displacement] = *sp;
3825 (P_)prev_frame = (P_)frame + displacement;
3829 tso->sp += displacement;
3830 tso->su = prev_frame;
3833 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3834 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3839 /* -----------------------------------------------------------------------------
3842 * We have to prepare for GC - this means doing lazy black holing
3843 * here. We also take the opportunity to do stack squeezing if it's
3845 * -------------------------------------------------------------------------- */
3847 threadPaused(StgTSO *tso)
3849 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3850 threadSqueezeStack(tso); // does black holing too
3852 threadLazyBlackHole(tso);
3855 /* -----------------------------------------------------------------------------
3857 * -------------------------------------------------------------------------- */
3861 printMutOnceList(generation *gen)
3863 StgMutClosure *p, *next;
3865 p = gen->mut_once_list;
3868 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3869 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3870 fprintf(stderr, "%p (%s), ",
3871 p, info_type((StgClosure *)p));
3873 fputc('\n', stderr);
3877 printMutableList(generation *gen)
3879 StgMutClosure *p, *next;
3884 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3885 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3886 fprintf(stderr, "%p (%s), ",
3887 p, info_type((StgClosure *)p));
3889 fputc('\n', stderr);
3892 static inline rtsBool
3893 maybeLarge(StgClosure *closure)
3895 StgInfoTable *info = get_itbl(closure);
3897 /* closure types that may be found on the new_large_objects list;
3898 see scavenge_large */
3899 return (info->type == MUT_ARR_PTRS ||
3900 info->type == MUT_ARR_PTRS_FROZEN ||
3901 info->type == TSO ||
3902 info->type == ARR_WORDS);