1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.116 2001/08/08 10:50:37 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
44 /* STATIC OBJECT LIST.
47 * We maintain a linked list of static objects that are still live.
48 * The requirements for this list are:
50 * - we need to scan the list while adding to it, in order to
51 * scavenge all the static objects (in the same way that
52 * breadth-first scavenging works for dynamic objects).
54 * - we need to be able to tell whether an object is already on
55 * the list, to break loops.
57 * Each static object has a "static link field", which we use for
58 * linking objects on to the list. We use a stack-type list, consing
59 * objects on the front as they are added (this means that the
60 * scavenge phase is depth-first, not breadth-first, but that
63 * A separate list is kept for objects that have been scavenged
64 * already - this is so that we can zero all the marks afterwards.
66 * An object is on the list if its static link field is non-zero; this
67 * means that we have to mark the end of the list with '1', not NULL.
69 * Extra notes for generational GC:
71 * Each generation has a static object list associated with it. When
72 * collecting generations up to N, we treat the static object lists
73 * from generations > N as roots.
75 * We build up a static object list while collecting generations 0..N,
76 * which is then appended to the static object list of generation N+1.
78 StgClosure* static_objects; // live static objects
79 StgClosure* scavenged_static_objects; // static objects scavenged so far
81 /* N is the oldest generation being collected, where the generations
82 * are numbered starting at 0. A major GC (indicated by the major_gc
83 * flag) is when we're collecting all generations. We only attempt to
84 * deal with static objects and GC CAFs when doing a major GC.
87 static rtsBool major_gc;
89 /* Youngest generation that objects should be evacuated to in
90 * evacuate(). (Logically an argument to evacuate, but it's static
91 * a lot of the time so we optimise it into a global variable).
97 StgWeak *old_weak_ptr_list; // also pending finaliser list
98 static rtsBool weak_done; // all done for this pass
100 /* List of all threads during GC
102 static StgTSO *old_all_threads;
103 static StgTSO *resurrected_threads;
105 /* Flag indicating failure to evacuate an object to the desired
108 static rtsBool failed_to_evac;
110 /* Old to-space (used for two-space collector only)
112 bdescr *old_to_blocks;
114 /* Data used for allocation area sizing.
116 lnat new_blocks; // blocks allocated during this GC
117 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
119 /* Used to avoid long recursion due to selector thunks
121 lnat thunk_selector_depth = 0;
122 #define MAX_THUNK_SELECTOR_DEPTH 256
124 /* -----------------------------------------------------------------------------
125 Static function declarations
126 -------------------------------------------------------------------------- */
128 static void mark_root ( StgClosure **root );
129 static StgClosure * evacuate ( StgClosure *q );
130 static void zero_static_object_list ( StgClosure* first_static );
131 static void zero_mutable_list ( StgMutClosure *first );
133 static rtsBool traverse_weak_ptr_list ( void );
134 static void mark_weak_ptr_list ( StgWeak **list );
136 static void scavenge ( step * );
137 static void scavenge_mark_stack ( void );
138 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
139 static rtsBool scavenge_one ( StgPtr p );
140 static void scavenge_large ( step * );
141 static void scavenge_static ( void );
142 static void scavenge_mutable_list ( generation *g );
143 static void scavenge_mut_once_list ( generation *g );
144 static void scavengeCAFs ( void );
146 #if 0 && defined(DEBUG)
147 static void gcCAFs ( void );
150 /* -----------------------------------------------------------------------------
151 inline functions etc. for dealing with the mark bitmap & stack.
152 -------------------------------------------------------------------------- */
154 #define MARK_STACK_BLOCKS 4
156 static bdescr *mark_stack_bdescr;
157 static StgPtr *mark_stack;
158 static StgPtr *mark_sp;
159 static StgPtr *mark_splim;
161 // Flag and pointers used for falling back to a linear scan when the
162 // mark stack overflows.
163 static rtsBool mark_stack_overflowed;
164 static bdescr *oldgen_scan_bd;
165 static StgPtr oldgen_scan;
167 static inline rtsBool
168 mark_stack_empty(void)
170 return mark_sp == mark_stack;
173 static inline rtsBool
174 mark_stack_full(void)
176 return mark_sp >= mark_splim;
180 reset_mark_stack(void)
182 mark_sp = mark_stack;
186 push_mark_stack(StgPtr p)
197 /* -----------------------------------------------------------------------------
200 For garbage collecting generation N (and all younger generations):
202 - follow all pointers in the root set. the root set includes all
203 mutable objects in all steps in all generations.
205 - for each pointer, evacuate the object it points to into either
206 + to-space in the next higher step in that generation, if one exists,
207 + if the object's generation == N, then evacuate it to the next
208 generation if one exists, or else to-space in the current
210 + if the object's generation < N, then evacuate it to to-space
211 in the next generation.
213 - repeatedly scavenge to-space from each step in each generation
214 being collected until no more objects can be evacuated.
216 - free from-space in each step, and set from-space = to-space.
218 -------------------------------------------------------------------------- */
221 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
225 lnat live, allocated, collected = 0, copied = 0;
226 lnat oldgen_saved_blocks = 0;
230 CostCentreStack *prev_CCS;
233 #if defined(DEBUG) && defined(GRAN)
234 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
238 // tell the stats department that we've started a GC
241 // Init stats and print par specific (timing) info
242 PAR_TICKY_PAR_START();
244 // attribute any costs to CCS_GC
250 /* Approximate how much we allocated.
251 * Todo: only when generating stats?
253 allocated = calcAllocated();
255 /* Figure out which generation to collect
257 if (force_major_gc) {
258 N = RtsFlags.GcFlags.generations - 1;
262 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
263 if (generations[g].steps[0].n_blocks +
264 generations[g].steps[0].n_large_blocks
265 >= generations[g].max_blocks) {
269 major_gc = (N == RtsFlags.GcFlags.generations-1);
272 #ifdef RTS_GTK_FRONTPANEL
273 if (RtsFlags.GcFlags.frontpanel) {
274 updateFrontPanelBeforeGC(N);
278 // check stack sanity *before* GC (ToDo: check all threads)
280 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
282 IF_DEBUG(sanity, checkFreeListSanity());
284 /* Initialise the static object lists
286 static_objects = END_OF_STATIC_LIST;
287 scavenged_static_objects = END_OF_STATIC_LIST;
289 /* zero the mutable list for the oldest generation (see comment by
290 * zero_mutable_list below).
293 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
296 /* Save the old to-space if we're doing a two-space collection
298 if (RtsFlags.GcFlags.generations == 1) {
299 old_to_blocks = g0s0->to_blocks;
300 g0s0->to_blocks = NULL;
303 /* Keep a count of how many new blocks we allocated during this GC
304 * (used for resizing the allocation area, later).
308 /* Initialise to-space in all the generations/steps that we're
311 for (g = 0; g <= N; g++) {
312 generations[g].mut_once_list = END_MUT_LIST;
313 generations[g].mut_list = END_MUT_LIST;
315 for (s = 0; s < generations[g].n_steps; s++) {
317 // generation 0, step 0 doesn't need to-space
318 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
322 /* Get a free block for to-space. Extra blocks will be chained on
326 stp = &generations[g].steps[s];
327 ASSERT(stp->gen_no == g);
328 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
332 bd->flags = BF_EVACUATED; // it's a to-space block
334 stp->hpLim = stp->hp + BLOCK_SIZE_W;
337 stp->n_to_blocks = 1;
338 stp->scan = bd->start;
340 stp->new_large_objects = NULL;
341 stp->scavenged_large_objects = NULL;
342 stp->n_scavenged_large_blocks = 0;
344 // mark the large objects as not evacuated yet
345 for (bd = stp->large_objects; bd; bd = bd->link) {
346 bd->flags = BF_LARGE;
349 // for a compacted step, we need to allocate the bitmap
350 if (stp->is_compacted) {
351 nat bitmap_size; // in bytes
352 bdescr *bitmap_bdescr;
355 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
357 if (bitmap_size > 0) {
358 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
360 stp->bitmap = bitmap_bdescr;
361 bitmap = bitmap_bdescr->start;
363 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
364 bitmap_size, bitmap););
366 // don't forget to fill it with zeros!
367 memset(bitmap, 0, bitmap_size);
369 // for each block in this step, point to its bitmap from the
371 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
372 bd->u.bitmap = bitmap;
373 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
380 /* make sure the older generations have at least one block to
381 * allocate into (this makes things easier for copy(), see below.
383 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
384 for (s = 0; s < generations[g].n_steps; s++) {
385 stp = &generations[g].steps[s];
386 if (stp->hp_bd == NULL) {
387 ASSERT(stp->blocks == NULL);
392 bd->flags = 0; // *not* a to-space block or a large object
394 stp->hpLim = stp->hp + BLOCK_SIZE_W;
400 /* Set the scan pointer for older generations: remember we
401 * still have to scavenge objects that have been promoted. */
403 stp->scan_bd = stp->hp_bd;
404 stp->to_blocks = NULL;
405 stp->n_to_blocks = 0;
406 stp->new_large_objects = NULL;
407 stp->scavenged_large_objects = NULL;
408 stp->n_scavenged_large_blocks = 0;
412 /* Allocate a mark stack if we're doing a major collection.
415 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
416 mark_stack = (StgPtr *)mark_stack_bdescr->start;
417 mark_sp = mark_stack;
418 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
420 mark_stack_bdescr = NULL;
423 /* -----------------------------------------------------------------------
424 * follow all the roots that we know about:
425 * - mutable lists from each generation > N
426 * we want to *scavenge* these roots, not evacuate them: they're not
427 * going to move in this GC.
428 * Also: do them in reverse generation order. This is because we
429 * often want to promote objects that are pointed to by older
430 * generations early, so we don't have to repeatedly copy them.
431 * Doing the generations in reverse order ensures that we don't end
432 * up in the situation where we want to evac an object to gen 3 and
433 * it has already been evaced to gen 2.
437 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
438 generations[g].saved_mut_list = generations[g].mut_list;
439 generations[g].mut_list = END_MUT_LIST;
442 // Do the mut-once lists first
443 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
444 IF_PAR_DEBUG(verbose,
445 printMutOnceList(&generations[g]));
446 scavenge_mut_once_list(&generations[g]);
448 for (st = generations[g].n_steps-1; st >= 0; st--) {
449 scavenge(&generations[g].steps[st]);
453 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
454 IF_PAR_DEBUG(verbose,
455 printMutableList(&generations[g]));
456 scavenge_mutable_list(&generations[g]);
458 for (st = generations[g].n_steps-1; st >= 0; st--) {
459 scavenge(&generations[g].steps[st]);
466 /* follow all the roots that the application knows about.
469 get_roots(mark_root);
472 /* And don't forget to mark the TSO if we got here direct from
474 /* Not needed in a seq version?
476 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
480 // Mark the entries in the GALA table of the parallel system
481 markLocalGAs(major_gc);
482 // Mark all entries on the list of pending fetches
483 markPendingFetches(major_gc);
486 /* Mark the weak pointer list, and prepare to detect dead weak
489 mark_weak_ptr_list(&weak_ptr_list);
490 old_weak_ptr_list = weak_ptr_list;
491 weak_ptr_list = NULL;
492 weak_done = rtsFalse;
494 /* The all_threads list is like the weak_ptr_list.
495 * See traverse_weak_ptr_list() for the details.
497 old_all_threads = all_threads;
498 all_threads = END_TSO_QUEUE;
499 resurrected_threads = END_TSO_QUEUE;
501 /* Mark the stable pointer table.
503 markStablePtrTable(mark_root);
507 /* ToDo: To fix the caf leak, we need to make the commented out
508 * parts of this code do something sensible - as described in
511 extern void markHugsObjects(void);
516 /* -------------------------------------------------------------------------
517 * Repeatedly scavenge all the areas we know about until there's no
518 * more scavenging to be done.
525 // scavenge static objects
526 if (major_gc && static_objects != END_OF_STATIC_LIST) {
527 IF_DEBUG(sanity, checkStaticObjects(static_objects));
531 /* When scavenging the older generations: Objects may have been
532 * evacuated from generations <= N into older generations, and we
533 * need to scavenge these objects. We're going to try to ensure that
534 * any evacuations that occur move the objects into at least the
535 * same generation as the object being scavenged, otherwise we
536 * have to create new entries on the mutable list for the older
540 // scavenge each step in generations 0..maxgen
546 // scavenge objects in compacted generation
547 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
548 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
549 scavenge_mark_stack();
553 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
554 for (st = generations[gen].n_steps; --st >= 0; ) {
555 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
558 stp = &generations[gen].steps[st];
560 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
565 if (stp->new_large_objects != NULL) {
574 if (flag) { goto loop; }
577 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
583 // Reconstruct the Global Address tables used in GUM
584 rebuildGAtables(major_gc);
585 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
588 // Now see which stable names are still alive.
591 // Tidy the end of the to-space chains
592 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
593 for (s = 0; s < generations[g].n_steps; s++) {
594 stp = &generations[g].steps[s];
595 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
596 stp->hp_bd->free = stp->hp;
597 stp->hp_bd->link = NULL;
602 // NO MORE EVACUATION AFTER THIS POINT!
603 // Finally: compaction of the oldest generation.
604 if (major_gc && oldest_gen->steps[0].is_compacted) {
605 // save number of blocks for stats
606 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
610 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
612 /* run through all the generations/steps and tidy up
614 copied = new_blocks * BLOCK_SIZE_W;
615 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
618 generations[g].collections++; // for stats
621 for (s = 0; s < generations[g].n_steps; s++) {
623 stp = &generations[g].steps[s];
625 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
626 // stats information: how much we copied
628 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
633 // for generations we collected...
636 // rough calculation of garbage collected, for stats output
637 if (stp->is_compacted) {
638 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
640 collected += stp->n_blocks * BLOCK_SIZE_W;
643 /* free old memory and shift to-space into from-space for all
644 * the collected steps (except the allocation area). These
645 * freed blocks will probaby be quickly recycled.
647 if (!(g == 0 && s == 0)) {
648 if (stp->is_compacted) {
649 // for a compacted step, just shift the new to-space
650 // onto the front of the now-compacted existing blocks.
651 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
652 bd->flags &= ~BF_EVACUATED; // now from-space
654 // tack the new blocks on the end of the existing blocks
655 if (stp->blocks == NULL) {
656 stp->blocks = stp->to_blocks;
658 for (bd = stp->blocks; bd != NULL; bd = next) {
661 bd->link = stp->to_blocks;
665 // add the new blocks to the block tally
666 stp->n_blocks += stp->n_to_blocks;
668 freeChain(stp->blocks);
669 stp->blocks = stp->to_blocks;
670 stp->n_blocks = stp->n_to_blocks;
671 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
672 bd->flags &= ~BF_EVACUATED; // now from-space
675 stp->to_blocks = NULL;
676 stp->n_to_blocks = 0;
679 /* LARGE OBJECTS. The current live large objects are chained on
680 * scavenged_large, having been moved during garbage
681 * collection from large_objects. Any objects left on
682 * large_objects list are therefore dead, so we free them here.
684 for (bd = stp->large_objects; bd != NULL; bd = next) {
690 // update the count of blocks used by large objects
691 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
692 bd->flags &= ~BF_EVACUATED;
694 stp->large_objects = stp->scavenged_large_objects;
695 stp->n_large_blocks = stp->n_scavenged_large_blocks;
698 // for older generations...
700 /* For older generations, we need to append the
701 * scavenged_large_object list (i.e. large objects that have been
702 * promoted during this GC) to the large_object list for that step.
704 for (bd = stp->scavenged_large_objects; bd; bd = next) {
706 bd->flags &= ~BF_EVACUATED;
707 dbl_link_onto(bd, &stp->large_objects);
710 // add the new blocks we promoted during this GC
711 stp->n_blocks += stp->n_to_blocks;
712 stp->n_large_blocks += stp->n_scavenged_large_blocks;
717 /* Reset the sizes of the older generations when we do a major
720 * CURRENT STRATEGY: make all generations except zero the same size.
721 * We have to stay within the maximum heap size, and leave a certain
722 * percentage of the maximum heap size available to allocate into.
724 if (major_gc && RtsFlags.GcFlags.generations > 1) {
725 nat live, size, min_alloc;
726 nat max = RtsFlags.GcFlags.maxHeapSize;
727 nat gens = RtsFlags.GcFlags.generations;
729 // live in the oldest generations
730 live = oldest_gen->steps[0].n_blocks +
731 oldest_gen->steps[0].n_large_blocks;
733 // default max size for all generations except zero
734 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
735 RtsFlags.GcFlags.minOldGenSize);
737 // minimum size for generation zero
738 min_alloc = (RtsFlags.GcFlags.pcFreeHeap * max) / 200;
740 // if we're going to go over the maximum heap size, reduce the
741 // size of the generations accordingly. The calculation is
742 // different if compaction is turned on, because we don't need
743 // to double the space required to collect the old generation.
745 if (RtsFlags.GcFlags.compact) {
746 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
747 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
750 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
751 size = (max - min_alloc) / ((gens - 1) * 2);
761 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
762 min_alloc, size, max);
765 for (g = 0; g < gens; g++) {
766 generations[g].max_blocks = size;
769 // Auto-enable compaction when the residency reaches a
770 // certain percentage of the maximum heap size (default: 30%).
771 if (RtsFlags.GcFlags.compact &&
772 oldest_gen->steps[0].n_blocks >
773 (RtsFlags.GcFlags.compactThreshold * max) / 100) {
774 oldest_gen->steps[0].is_compacted = 1;
775 // fprintf(stderr,"compaction: on\n", live);
777 oldest_gen->steps[0].is_compacted = 0;
778 // fprintf(stderr,"compaction: off\n", live);
782 // Guess the amount of live data for stats.
785 /* Free the small objects allocated via allocate(), since this will
786 * all have been copied into G0S1 now.
788 if (small_alloc_list != NULL) {
789 freeChain(small_alloc_list);
791 small_alloc_list = NULL;
795 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
797 // Start a new pinned_object_block
798 pinned_object_block = NULL;
800 /* Free the mark stack.
802 if (mark_stack_bdescr != NULL) {
803 freeGroup(mark_stack_bdescr);
808 for (g = 0; g <= N; g++) {
809 for (s = 0; s < generations[g].n_steps; s++) {
810 stp = &generations[g].steps[s];
811 if (stp->is_compacted && stp->bitmap != NULL) {
812 freeGroup(stp->bitmap);
817 /* Two-space collector:
818 * Free the old to-space, and estimate the amount of live data.
820 if (RtsFlags.GcFlags.generations == 1) {
823 if (old_to_blocks != NULL) {
824 freeChain(old_to_blocks);
826 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
827 bd->flags = 0; // now from-space
830 /* For a two-space collector, we need to resize the nursery. */
832 /* set up a new nursery. Allocate a nursery size based on a
833 * function of the amount of live data (by default a factor of 2)
834 * Use the blocks from the old nursery if possible, freeing up any
837 * If we get near the maximum heap size, then adjust our nursery
838 * size accordingly. If the nursery is the same size as the live
839 * data (L), then we need 3L bytes. We can reduce the size of the
840 * nursery to bring the required memory down near 2L bytes.
842 * A normal 2-space collector would need 4L bytes to give the same
843 * performance we get from 3L bytes, reducing to the same
844 * performance at 2L bytes.
846 blocks = g0s0->n_to_blocks;
848 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
849 RtsFlags.GcFlags.maxHeapSize ) {
850 long adjusted_blocks; // signed on purpose
853 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
854 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
855 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
856 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
859 blocks = adjusted_blocks;
862 blocks *= RtsFlags.GcFlags.oldGenFactor;
863 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
864 blocks = RtsFlags.GcFlags.minAllocAreaSize;
867 resizeNursery(blocks);
870 /* Generational collector:
871 * If the user has given us a suggested heap size, adjust our
872 * allocation area to make best use of the memory available.
875 if (RtsFlags.GcFlags.heapSizeSuggestion) {
877 nat needed = calcNeeded(); // approx blocks needed at next GC
879 /* Guess how much will be live in generation 0 step 0 next time.
880 * A good approximation is obtained by finding the
881 * percentage of g0s0 that was live at the last minor GC.
884 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
887 /* Estimate a size for the allocation area based on the
888 * information available. We might end up going slightly under
889 * or over the suggested heap size, but we should be pretty
892 * Formula: suggested - needed
893 * ----------------------------
894 * 1 + g0s0_pcnt_kept/100
896 * where 'needed' is the amount of memory needed at the next
897 * collection for collecting all steps except g0s0.
900 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
901 (100 + (long)g0s0_pcnt_kept);
903 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
904 blocks = RtsFlags.GcFlags.minAllocAreaSize;
907 resizeNursery((nat)blocks);
911 // mark the garbage collected CAFs as dead
912 #if 0 && defined(DEBUG) // doesn't work at the moment
913 if (major_gc) { gcCAFs(); }
916 // zero the scavenged static object list
918 zero_static_object_list(scavenged_static_objects);
924 // start any pending finalizers
925 scheduleFinalizers(old_weak_ptr_list);
927 // send exceptions to any threads which were about to die
928 resurrectThreads(resurrected_threads);
930 // Update the stable pointer hash table.
931 updateStablePtrTable(major_gc);
933 // check sanity after GC
934 IF_DEBUG(sanity, checkSanity());
936 // extra GC trace info
937 IF_DEBUG(gc, statDescribeGens());
940 // symbol-table based profiling
941 /* heapCensus(to_blocks); */ /* ToDo */
944 // restore enclosing cost centre
950 // check for memory leaks if sanity checking is on
951 IF_DEBUG(sanity, memInventory());
953 #ifdef RTS_GTK_FRONTPANEL
954 if (RtsFlags.GcFlags.frontpanel) {
955 updateFrontPanelAfterGC( N, live );
959 // ok, GC over: tell the stats department what happened.
960 stat_endGC(allocated, collected, live, copied, N);
966 /* -----------------------------------------------------------------------------
969 traverse_weak_ptr_list is called possibly many times during garbage
970 collection. It returns a flag indicating whether it did any work
971 (i.e. called evacuate on any live pointers).
973 Invariant: traverse_weak_ptr_list is called when the heap is in an
974 idempotent state. That means that there are no pending
975 evacuate/scavenge operations. This invariant helps the weak
976 pointer code decide which weak pointers are dead - if there are no
977 new live weak pointers, then all the currently unreachable ones are
980 For generational GC: we just don't try to finalize weak pointers in
981 older generations than the one we're collecting. This could
982 probably be optimised by keeping per-generation lists of weak
983 pointers, but for a few weak pointers this scheme will work.
984 -------------------------------------------------------------------------- */
987 traverse_weak_ptr_list(void)
989 StgWeak *w, **last_w, *next_w;
991 rtsBool flag = rtsFalse;
993 if (weak_done) { return rtsFalse; }
995 /* doesn't matter where we evacuate values/finalizers to, since
996 * these pointers are treated as roots (iff the keys are alive).
1000 last_w = &old_weak_ptr_list;
1001 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1003 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1004 * called on a live weak pointer object. Just remove it.
1006 if (w->header.info == &stg_DEAD_WEAK_info) {
1007 next_w = ((StgDeadWeak *)w)->link;
1012 ASSERT(get_itbl(w)->type == WEAK);
1014 /* Now, check whether the key is reachable.
1016 new = isAlive(w->key);
1019 // evacuate the value and finalizer
1020 w->value = evacuate(w->value);
1021 w->finalizer = evacuate(w->finalizer);
1022 // remove this weak ptr from the old_weak_ptr list
1024 // and put it on the new weak ptr list
1026 w->link = weak_ptr_list;
1029 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1033 last_w = &(w->link);
1039 /* Now deal with the all_threads list, which behaves somewhat like
1040 * the weak ptr list. If we discover any threads that are about to
1041 * become garbage, we wake them up and administer an exception.
1044 StgTSO *t, *tmp, *next, **prev;
1046 prev = &old_all_threads;
1047 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1049 (StgClosure *)tmp = isAlive((StgClosure *)t);
1055 ASSERT(get_itbl(t)->type == TSO);
1056 switch (t->what_next) {
1057 case ThreadRelocated:
1062 case ThreadComplete:
1063 // finshed or died. The thread might still be alive, but we
1064 // don't keep it on the all_threads list. Don't forget to
1065 // stub out its global_link field.
1066 next = t->global_link;
1067 t->global_link = END_TSO_QUEUE;
1075 // not alive (yet): leave this thread on the old_all_threads list.
1076 prev = &(t->global_link);
1077 next = t->global_link;
1080 // alive: move this thread onto the all_threads list.
1081 next = t->global_link;
1082 t->global_link = all_threads;
1089 /* If we didn't make any changes, then we can go round and kill all
1090 * the dead weak pointers. The old_weak_ptr list is used as a list
1091 * of pending finalizers later on.
1093 if (flag == rtsFalse) {
1094 for (w = old_weak_ptr_list; w; w = w->link) {
1095 w->finalizer = evacuate(w->finalizer);
1098 /* And resurrect any threads which were about to become garbage.
1101 StgTSO *t, *tmp, *next;
1102 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1103 next = t->global_link;
1104 (StgClosure *)tmp = evacuate((StgClosure *)t);
1105 tmp->global_link = resurrected_threads;
1106 resurrected_threads = tmp;
1110 weak_done = rtsTrue;
1116 /* -----------------------------------------------------------------------------
1117 After GC, the live weak pointer list may have forwarding pointers
1118 on it, because a weak pointer object was evacuated after being
1119 moved to the live weak pointer list. We remove those forwarding
1122 Also, we don't consider weak pointer objects to be reachable, but
1123 we must nevertheless consider them to be "live" and retain them.
1124 Therefore any weak pointer objects which haven't as yet been
1125 evacuated need to be evacuated now.
1126 -------------------------------------------------------------------------- */
1130 mark_weak_ptr_list ( StgWeak **list )
1132 StgWeak *w, **last_w;
1135 for (w = *list; w; w = w->link) {
1136 (StgClosure *)w = evacuate((StgClosure *)w);
1138 last_w = &(w->link);
1142 /* -----------------------------------------------------------------------------
1143 isAlive determines whether the given closure is still alive (after
1144 a garbage collection) or not. It returns the new address of the
1145 closure if it is alive, or NULL otherwise.
1147 NOTE: Use it before compaction only!
1148 -------------------------------------------------------------------------- */
1152 isAlive(StgClosure *p)
1154 const StgInfoTable *info;
1161 /* ToDo: for static closures, check the static link field.
1162 * Problem here is that we sometimes don't set the link field, eg.
1163 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1168 // ignore closures in generations that we're not collecting.
1169 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1172 // large objects have an evacuated flag
1173 if (bd->flags & BF_LARGE) {
1174 if (bd->flags & BF_EVACUATED) {
1180 // check the mark bit for compacted steps
1181 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1185 switch (info->type) {
1190 case IND_OLDGEN: // rely on compatible layout with StgInd
1191 case IND_OLDGEN_PERM:
1192 // follow indirections
1193 p = ((StgInd *)p)->indirectee;
1198 return ((StgEvacuated *)p)->evacuee;
1201 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1202 p = (StgClosure *)((StgTSO *)p)->link;
1214 mark_root(StgClosure **root)
1216 *root = evacuate(*root);
1222 bdescr *bd = allocBlock();
1223 bd->gen_no = stp->gen_no;
1226 if (stp->gen_no <= N) {
1227 bd->flags = BF_EVACUATED;
1232 stp->hp_bd->free = stp->hp;
1233 stp->hp_bd->link = bd;
1234 stp->hp = bd->start;
1235 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1242 static __inline__ void
1243 upd_evacuee(StgClosure *p, StgClosure *dest)
1245 p->header.info = &stg_EVACUATED_info;
1246 ((StgEvacuated *)p)->evacuee = dest;
1250 static __inline__ StgClosure *
1251 copy(StgClosure *src, nat size, step *stp)
1255 TICK_GC_WORDS_COPIED(size);
1256 /* Find out where we're going, using the handy "to" pointer in
1257 * the step of the source object. If it turns out we need to
1258 * evacuate to an older generation, adjust it here (see comment
1261 if (stp->gen_no < evac_gen) {
1262 #ifdef NO_EAGER_PROMOTION
1263 failed_to_evac = rtsTrue;
1265 stp = &generations[evac_gen].steps[0];
1269 /* chain a new block onto the to-space for the destination step if
1272 if (stp->hp + size >= stp->hpLim) {
1276 for(to = stp->hp, from = (P_)src; size>0; --size) {
1282 upd_evacuee(src,(StgClosure *)dest);
1283 return (StgClosure *)dest;
1286 /* Special version of copy() for when we only want to copy the info
1287 * pointer of an object, but reserve some padding after it. This is
1288 * used to optimise evacuation of BLACKHOLEs.
1292 static __inline__ StgClosure *
1293 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1297 TICK_GC_WORDS_COPIED(size_to_copy);
1298 if (stp->gen_no < evac_gen) {
1299 #ifdef NO_EAGER_PROMOTION
1300 failed_to_evac = rtsTrue;
1302 stp = &generations[evac_gen].steps[0];
1306 if (stp->hp + size_to_reserve >= stp->hpLim) {
1310 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1315 stp->hp += size_to_reserve;
1316 upd_evacuee(src,(StgClosure *)dest);
1317 return (StgClosure *)dest;
1321 /* -----------------------------------------------------------------------------
1322 Evacuate a large object
1324 This just consists of removing the object from the (doubly-linked)
1325 large_alloc_list, and linking it on to the (singly-linked)
1326 new_large_objects list, from where it will be scavenged later.
1328 Convention: bd->flags has BF_EVACUATED set for a large object
1329 that has been evacuated, or unset otherwise.
1330 -------------------------------------------------------------------------- */
1334 evacuate_large(StgPtr p)
1336 bdescr *bd = Bdescr(p);
1339 // should point to the beginning of the block
1340 ASSERT(((W_)p & BLOCK_MASK) == 0);
1342 // already evacuated?
1343 if (bd->flags & BF_EVACUATED) {
1344 /* Don't forget to set the failed_to_evac flag if we didn't get
1345 * the desired destination (see comments in evacuate()).
1347 if (bd->gen_no < evac_gen) {
1348 failed_to_evac = rtsTrue;
1349 TICK_GC_FAILED_PROMOTION();
1355 // remove from large_object list
1357 bd->u.back->link = bd->link;
1358 } else { // first object in the list
1359 stp->large_objects = bd->link;
1362 bd->link->u.back = bd->u.back;
1365 /* link it on to the evacuated large object list of the destination step
1368 if (stp->gen_no < evac_gen) {
1369 #ifdef NO_EAGER_PROMOTION
1370 failed_to_evac = rtsTrue;
1372 stp = &generations[evac_gen].steps[0];
1377 bd->gen_no = stp->gen_no;
1378 bd->link = stp->new_large_objects;
1379 stp->new_large_objects = bd;
1380 bd->flags |= BF_EVACUATED;
1383 /* -----------------------------------------------------------------------------
1384 Adding a MUT_CONS to an older generation.
1386 This is necessary from time to time when we end up with an
1387 old-to-new generation pointer in a non-mutable object. We defer
1388 the promotion until the next GC.
1389 -------------------------------------------------------------------------- */
1393 mkMutCons(StgClosure *ptr, generation *gen)
1398 stp = &gen->steps[0];
1400 /* chain a new block onto the to-space for the destination step if
1403 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1407 q = (StgMutVar *)stp->hp;
1408 stp->hp += sizeofW(StgMutVar);
1410 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1412 recordOldToNewPtrs((StgMutClosure *)q);
1414 return (StgClosure *)q;
1417 /* -----------------------------------------------------------------------------
1420 This is called (eventually) for every live object in the system.
1422 The caller to evacuate specifies a desired generation in the
1423 evac_gen global variable. The following conditions apply to
1424 evacuating an object which resides in generation M when we're
1425 collecting up to generation N
1429 else evac to step->to
1431 if M < evac_gen evac to evac_gen, step 0
1433 if the object is already evacuated, then we check which generation
1436 if M >= evac_gen do nothing
1437 if M < evac_gen set failed_to_evac flag to indicate that we
1438 didn't manage to evacuate this object into evac_gen.
1440 -------------------------------------------------------------------------- */
1443 evacuate(StgClosure *q)
1448 const StgInfoTable *info;
1451 if (HEAP_ALLOCED(q)) {
1454 if (bd->gen_no > N) {
1455 /* Can't evacuate this object, because it's in a generation
1456 * older than the ones we're collecting. Let's hope that it's
1457 * in evac_gen or older, or we will have to arrange to track
1458 * this pointer using the mutable list.
1460 if (bd->gen_no < evac_gen) {
1462 failed_to_evac = rtsTrue;
1463 TICK_GC_FAILED_PROMOTION();
1468 /* evacuate large objects by re-linking them onto a different list.
1470 if (bd->flags & BF_LARGE) {
1472 if (info->type == TSO &&
1473 ((StgTSO *)q)->what_next == ThreadRelocated) {
1474 q = (StgClosure *)((StgTSO *)q)->link;
1477 evacuate_large((P_)q);
1481 /* If the object is in a step that we're compacting, then we
1482 * need to use an alternative evacuate procedure.
1484 if (bd->step->is_compacted) {
1485 if (!is_marked((P_)q,bd)) {
1487 if (mark_stack_full()) {
1488 mark_stack_overflowed = rtsTrue;
1491 push_mark_stack((P_)q);
1499 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1502 // make sure the info pointer is into text space
1503 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1504 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1507 switch (info -> type) {
1511 to = copy(q,sizeW_fromITBL(info),stp);
1516 StgWord w = (StgWord)q->payload[0];
1517 if (q->header.info == Czh_con_info &&
1518 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1519 (StgChar)w <= MAX_CHARLIKE) {
1520 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1522 if (q->header.info == Izh_con_info &&
1523 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1524 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1526 // else, fall through ...
1532 return copy(q,sizeofW(StgHeader)+1,stp);
1534 case THUNK_1_0: // here because of MIN_UPD_SIZE
1539 #ifdef NO_PROMOTE_THUNKS
1540 if (bd->gen_no == 0 &&
1541 bd->step->no != 0 &&
1542 bd->step->no == generations[bd->gen_no].n_steps-1) {
1546 return copy(q,sizeofW(StgHeader)+2,stp);
1554 return copy(q,sizeofW(StgHeader)+2,stp);
1560 case IND_OLDGEN_PERM:
1565 return copy(q,sizeW_fromITBL(info),stp);
1568 case SE_CAF_BLACKHOLE:
1571 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1574 to = copy(q,BLACKHOLE_sizeW(),stp);
1577 case THUNK_SELECTOR:
1579 const StgInfoTable* selectee_info;
1580 StgClosure* selectee = ((StgSelector*)q)->selectee;
1583 selectee_info = get_itbl(selectee);
1584 switch (selectee_info->type) {
1593 StgWord offset = info->layout.selector_offset;
1595 // check that the size is in range
1597 (StgWord32)(selectee_info->layout.payload.ptrs +
1598 selectee_info->layout.payload.nptrs));
1600 // perform the selection!
1601 q = selectee->payload[offset];
1603 /* if we're already in to-space, there's no need to continue
1604 * with the evacuation, just update the source address with
1605 * a pointer to the (evacuated) constructor field.
1607 if (HEAP_ALLOCED(q)) {
1608 bdescr *bd = Bdescr((P_)q);
1609 if (bd->flags & BF_EVACUATED) {
1610 if (bd->gen_no < evac_gen) {
1611 failed_to_evac = rtsTrue;
1612 TICK_GC_FAILED_PROMOTION();
1618 /* otherwise, carry on and evacuate this constructor field,
1619 * (but not the constructor itself)
1628 case IND_OLDGEN_PERM:
1629 selectee = ((StgInd *)selectee)->indirectee;
1633 selectee = ((StgEvacuated *)selectee)->evacuee;
1636 case THUNK_SELECTOR:
1638 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1639 something) to go into an infinite loop when the nightly
1640 stage2 compiles PrelTup.lhs. */
1642 /* we can't recurse indefinitely in evacuate(), so set a
1643 * limit on the number of times we can go around this
1646 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1648 bd = Bdescr((P_)selectee);
1649 if (!bd->flags & BF_EVACUATED) {
1650 thunk_selector_depth++;
1651 selectee = evacuate(selectee);
1652 thunk_selector_depth--;
1656 // otherwise, fall through...
1668 case SE_CAF_BLACKHOLE:
1672 // not evaluated yet
1676 // a copy of the top-level cases below
1677 case RBH: // cf. BLACKHOLE_BQ
1679 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1680 to = copy(q,BLACKHOLE_sizeW(),stp);
1681 //ToDo: derive size etc from reverted IP
1682 //to = copy(q,size,stp);
1683 // recordMutable((StgMutClosure *)to);
1688 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1689 to = copy(q,sizeofW(StgBlockedFetch),stp);
1696 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1697 to = copy(q,sizeofW(StgFetchMe),stp);
1701 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1702 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1707 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1708 (int)(selectee_info->type));
1711 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1715 // follow chains of indirections, don't evacuate them
1716 q = ((StgInd*)q)->indirectee;
1720 if (info->srt_len > 0 && major_gc &&
1721 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1722 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1723 static_objects = (StgClosure *)q;
1728 if (info->srt_len > 0 && major_gc &&
1729 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1730 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1731 static_objects = (StgClosure *)q;
1736 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1737 * on the CAF list, so don't do anything with it here (we'll
1738 * scavenge it later).
1741 && ((StgIndStatic *)q)->saved_info == NULL
1742 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1743 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1744 static_objects = (StgClosure *)q;
1749 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1750 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1751 static_objects = (StgClosure *)q;
1755 case CONSTR_INTLIKE:
1756 case CONSTR_CHARLIKE:
1757 case CONSTR_NOCAF_STATIC:
1758 /* no need to put these on the static linked list, they don't need
1773 // shouldn't see these
1774 barf("evacuate: stack frame at %p\n", q);
1778 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1779 * of stack, tagging and all.
1781 return copy(q,pap_sizeW((StgPAP*)q),stp);
1784 /* Already evacuated, just return the forwarding address.
1785 * HOWEVER: if the requested destination generation (evac_gen) is
1786 * older than the actual generation (because the object was
1787 * already evacuated to a younger generation) then we have to
1788 * set the failed_to_evac flag to indicate that we couldn't
1789 * manage to promote the object to the desired generation.
1791 if (evac_gen > 0) { // optimisation
1792 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1793 if (Bdescr((P_)p)->gen_no < evac_gen) {
1794 failed_to_evac = rtsTrue;
1795 TICK_GC_FAILED_PROMOTION();
1798 return ((StgEvacuated*)q)->evacuee;
1801 // just copy the block
1802 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1805 case MUT_ARR_PTRS_FROZEN:
1806 // just copy the block
1807 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1811 StgTSO *tso = (StgTSO *)q;
1813 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1815 if (tso->what_next == ThreadRelocated) {
1816 q = (StgClosure *)tso->link;
1820 /* To evacuate a small TSO, we need to relocate the update frame
1824 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1825 move_TSO(tso, new_tso);
1826 return (StgClosure *)new_tso;
1831 case RBH: // cf. BLACKHOLE_BQ
1833 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1834 to = copy(q,BLACKHOLE_sizeW(),stp);
1835 //ToDo: derive size etc from reverted IP
1836 //to = copy(q,size,stp);
1838 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1839 q, info_type(q), to, info_type(to)));
1844 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1845 to = copy(q,sizeofW(StgBlockedFetch),stp);
1847 belch("@@ evacuate: %p (%s) to %p (%s)",
1848 q, info_type(q), to, info_type(to)));
1855 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1856 to = copy(q,sizeofW(StgFetchMe),stp);
1858 belch("@@ evacuate: %p (%s) to %p (%s)",
1859 q, info_type(q), to, info_type(to)));
1863 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1864 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1866 belch("@@ evacuate: %p (%s) to %p (%s)",
1867 q, info_type(q), to, info_type(to)));
1872 barf("evacuate: strange closure type %d", (int)(info->type));
1878 /* -----------------------------------------------------------------------------
1879 move_TSO is called to update the TSO structure after it has been
1880 moved from one place to another.
1881 -------------------------------------------------------------------------- */
1884 move_TSO(StgTSO *src, StgTSO *dest)
1888 // relocate the stack pointers...
1889 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1890 dest->sp = (StgPtr)dest->sp + diff;
1891 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1893 relocate_stack(dest, diff);
1896 /* -----------------------------------------------------------------------------
1897 relocate_stack is called to update the linkage between
1898 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1900 -------------------------------------------------------------------------- */
1903 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1911 while ((P_)su < dest->stack + dest->stack_size) {
1912 switch (get_itbl(su)->type) {
1914 // GCC actually manages to common up these three cases!
1917 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1922 cf = (StgCatchFrame *)su;
1923 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1928 sf = (StgSeqFrame *)su;
1929 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1938 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1949 scavenge_srt(const StgInfoTable *info)
1951 StgClosure **srt, **srt_end;
1953 /* evacuate the SRT. If srt_len is zero, then there isn't an
1954 * srt field in the info table. That's ok, because we'll
1955 * never dereference it.
1957 srt = (StgClosure **)(info->srt);
1958 srt_end = srt + info->srt_len;
1959 for (; srt < srt_end; srt++) {
1960 /* Special-case to handle references to closures hiding out in DLLs, since
1961 double indirections required to get at those. The code generator knows
1962 which is which when generating the SRT, so it stores the (indirect)
1963 reference to the DLL closure in the table by first adding one to it.
1964 We check for this here, and undo the addition before evacuating it.
1966 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1967 closure that's fixed at link-time, and no extra magic is required.
1969 #ifdef ENABLE_WIN32_DLL_SUPPORT
1970 if ( (unsigned long)(*srt) & 0x1 ) {
1971 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1981 /* -----------------------------------------------------------------------------
1983 -------------------------------------------------------------------------- */
1986 scavengeTSO (StgTSO *tso)
1988 // chase the link field for any TSOs on the same queue
1989 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1990 if ( tso->why_blocked == BlockedOnMVar
1991 || tso->why_blocked == BlockedOnBlackHole
1992 || tso->why_blocked == BlockedOnException
1994 || tso->why_blocked == BlockedOnGA
1995 || tso->why_blocked == BlockedOnGA_NoSend
1998 tso->block_info.closure = evacuate(tso->block_info.closure);
2000 if ( tso->blocked_exceptions != NULL ) {
2001 tso->blocked_exceptions =
2002 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2004 // scavenge this thread's stack
2005 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2008 /* -----------------------------------------------------------------------------
2009 Scavenge a given step until there are no more objects in this step
2012 evac_gen is set by the caller to be either zero (for a step in a
2013 generation < N) or G where G is the generation of the step being
2016 We sometimes temporarily change evac_gen back to zero if we're
2017 scavenging a mutable object where early promotion isn't such a good
2019 -------------------------------------------------------------------------- */
2027 nat saved_evac_gen = evac_gen;
2032 failed_to_evac = rtsFalse;
2034 /* scavenge phase - standard breadth-first scavenging of the
2038 while (bd != stp->hp_bd || p < stp->hp) {
2040 // If we're at the end of this block, move on to the next block
2041 if (bd != stp->hp_bd && p == bd->free) {
2047 info = get_itbl((StgClosure *)p);
2048 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2051 switch (info->type) {
2054 /* treat MVars specially, because we don't want to evacuate the
2055 * mut_link field in the middle of the closure.
2058 StgMVar *mvar = ((StgMVar *)p);
2060 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2061 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2062 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2063 evac_gen = saved_evac_gen;
2064 recordMutable((StgMutClosure *)mvar);
2065 failed_to_evac = rtsFalse; // mutable.
2066 p += sizeofW(StgMVar);
2074 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2075 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2076 p += sizeofW(StgHeader) + 2;
2081 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2082 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2088 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2089 p += sizeofW(StgHeader) + 1;
2094 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2100 p += sizeofW(StgHeader) + 1;
2107 p += sizeofW(StgHeader) + 2;
2114 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2115 p += sizeofW(StgHeader) + 2;
2131 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2132 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2133 (StgClosure *)*p = evacuate((StgClosure *)*p);
2135 p += info->layout.payload.nptrs;
2140 if (stp->gen_no != 0) {
2141 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2144 case IND_OLDGEN_PERM:
2145 ((StgIndOldGen *)p)->indirectee =
2146 evacuate(((StgIndOldGen *)p)->indirectee);
2147 if (failed_to_evac) {
2148 failed_to_evac = rtsFalse;
2149 recordOldToNewPtrs((StgMutClosure *)p);
2151 p += sizeofW(StgIndOldGen);
2156 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2157 evac_gen = saved_evac_gen;
2158 recordMutable((StgMutClosure *)p);
2159 failed_to_evac = rtsFalse; // mutable anyhow
2160 p += sizeofW(StgMutVar);
2165 failed_to_evac = rtsFalse; // mutable anyhow
2166 p += sizeofW(StgMutVar);
2170 case SE_CAF_BLACKHOLE:
2173 p += BLACKHOLE_sizeW();
2178 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2179 (StgClosure *)bh->blocking_queue =
2180 evacuate((StgClosure *)bh->blocking_queue);
2181 recordMutable((StgMutClosure *)bh);
2182 failed_to_evac = rtsFalse;
2183 p += BLACKHOLE_sizeW();
2187 case THUNK_SELECTOR:
2189 StgSelector *s = (StgSelector *)p;
2190 s->selectee = evacuate(s->selectee);
2191 p += THUNK_SELECTOR_sizeW();
2195 case AP_UPD: // same as PAPs
2197 /* Treat a PAP just like a section of stack, not forgetting to
2198 * evacuate the function pointer too...
2201 StgPAP* pap = (StgPAP *)p;
2203 pap->fun = evacuate(pap->fun);
2204 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2205 p += pap_sizeW(pap);
2210 // nothing to follow
2211 p += arr_words_sizeW((StgArrWords *)p);
2215 // follow everything
2219 evac_gen = 0; // repeatedly mutable
2220 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2221 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2222 (StgClosure *)*p = evacuate((StgClosure *)*p);
2224 evac_gen = saved_evac_gen;
2225 recordMutable((StgMutClosure *)q);
2226 failed_to_evac = rtsFalse; // mutable anyhow.
2230 case MUT_ARR_PTRS_FROZEN:
2231 // follow everything
2235 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2236 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2237 (StgClosure *)*p = evacuate((StgClosure *)*p);
2239 // it's tempting to recordMutable() if failed_to_evac is
2240 // false, but that breaks some assumptions (eg. every
2241 // closure on the mutable list is supposed to have the MUT
2242 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2248 StgTSO *tso = (StgTSO *)p;
2251 evac_gen = saved_evac_gen;
2252 recordMutable((StgMutClosure *)tso);
2253 failed_to_evac = rtsFalse; // mutable anyhow.
2254 p += tso_sizeW(tso);
2259 case RBH: // cf. BLACKHOLE_BQ
2262 nat size, ptrs, nonptrs, vhs;
2264 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2266 StgRBH *rbh = (StgRBH *)p;
2267 (StgClosure *)rbh->blocking_queue =
2268 evacuate((StgClosure *)rbh->blocking_queue);
2269 recordMutable((StgMutClosure *)to);
2270 failed_to_evac = rtsFalse; // mutable anyhow.
2272 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2273 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2274 // ToDo: use size of reverted closure here!
2275 p += BLACKHOLE_sizeW();
2281 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2282 // follow the pointer to the node which is being demanded
2283 (StgClosure *)bf->node =
2284 evacuate((StgClosure *)bf->node);
2285 // follow the link to the rest of the blocking queue
2286 (StgClosure *)bf->link =
2287 evacuate((StgClosure *)bf->link);
2288 if (failed_to_evac) {
2289 failed_to_evac = rtsFalse;
2290 recordMutable((StgMutClosure *)bf);
2293 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2294 bf, info_type((StgClosure *)bf),
2295 bf->node, info_type(bf->node)));
2296 p += sizeofW(StgBlockedFetch);
2304 p += sizeofW(StgFetchMe);
2305 break; // nothing to do in this case
2307 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2309 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2310 (StgClosure *)fmbq->blocking_queue =
2311 evacuate((StgClosure *)fmbq->blocking_queue);
2312 if (failed_to_evac) {
2313 failed_to_evac = rtsFalse;
2314 recordMutable((StgMutClosure *)fmbq);
2317 belch("@@ scavenge: %p (%s) exciting, isn't it",
2318 p, info_type((StgClosure *)p)));
2319 p += sizeofW(StgFetchMeBlockingQueue);
2325 barf("scavenge: unimplemented/strange closure type %d @ %p",
2329 /* If we didn't manage to promote all the objects pointed to by
2330 * the current object, then we have to designate this object as
2331 * mutable (because it contains old-to-new generation pointers).
2333 if (failed_to_evac) {
2334 failed_to_evac = rtsFalse;
2335 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2343 /* -----------------------------------------------------------------------------
2344 Scavenge everything on the mark stack.
2346 This is slightly different from scavenge():
2347 - we don't walk linearly through the objects, so the scavenger
2348 doesn't need to advance the pointer on to the next object.
2349 -------------------------------------------------------------------------- */
2352 scavenge_mark_stack(void)
2358 evac_gen = oldest_gen->no;
2359 saved_evac_gen = evac_gen;
2362 while (!mark_stack_empty()) {
2363 p = pop_mark_stack();
2365 info = get_itbl((StgClosure *)p);
2366 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2369 switch (info->type) {
2372 /* treat MVars specially, because we don't want to evacuate the
2373 * mut_link field in the middle of the closure.
2376 StgMVar *mvar = ((StgMVar *)p);
2378 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2379 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2380 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2381 evac_gen = saved_evac_gen;
2382 failed_to_evac = rtsFalse; // mutable.
2390 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2391 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2401 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2426 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2427 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2428 (StgClosure *)*p = evacuate((StgClosure *)*p);
2434 // don't need to do anything here: the only possible case
2435 // is that we're in a 1-space compacting collector, with
2436 // no "old" generation.
2440 case IND_OLDGEN_PERM:
2441 ((StgIndOldGen *)p)->indirectee =
2442 evacuate(((StgIndOldGen *)p)->indirectee);
2443 if (failed_to_evac) {
2444 recordOldToNewPtrs((StgMutClosure *)p);
2446 failed_to_evac = rtsFalse;
2451 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2452 evac_gen = saved_evac_gen;
2453 failed_to_evac = rtsFalse;
2458 failed_to_evac = rtsFalse;
2462 case SE_CAF_BLACKHOLE:
2470 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2471 (StgClosure *)bh->blocking_queue =
2472 evacuate((StgClosure *)bh->blocking_queue);
2473 failed_to_evac = rtsFalse;
2477 case THUNK_SELECTOR:
2479 StgSelector *s = (StgSelector *)p;
2480 s->selectee = evacuate(s->selectee);
2484 case AP_UPD: // same as PAPs
2486 /* Treat a PAP just like a section of stack, not forgetting to
2487 * evacuate the function pointer too...
2490 StgPAP* pap = (StgPAP *)p;
2492 pap->fun = evacuate(pap->fun);
2493 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2498 // follow everything
2502 evac_gen = 0; // repeatedly mutable
2503 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2504 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2505 (StgClosure *)*p = evacuate((StgClosure *)*p);
2507 evac_gen = saved_evac_gen;
2508 failed_to_evac = rtsFalse; // mutable anyhow.
2512 case MUT_ARR_PTRS_FROZEN:
2513 // follow everything
2517 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2518 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2519 (StgClosure *)*p = evacuate((StgClosure *)*p);
2526 StgTSO *tso = (StgTSO *)p;
2529 evac_gen = saved_evac_gen;
2530 failed_to_evac = rtsFalse;
2535 case RBH: // cf. BLACKHOLE_BQ
2538 nat size, ptrs, nonptrs, vhs;
2540 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2542 StgRBH *rbh = (StgRBH *)p;
2543 (StgClosure *)rbh->blocking_queue =
2544 evacuate((StgClosure *)rbh->blocking_queue);
2545 recordMutable((StgMutClosure *)rbh);
2546 failed_to_evac = rtsFalse; // mutable anyhow.
2548 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2549 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2555 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2556 // follow the pointer to the node which is being demanded
2557 (StgClosure *)bf->node =
2558 evacuate((StgClosure *)bf->node);
2559 // follow the link to the rest of the blocking queue
2560 (StgClosure *)bf->link =
2561 evacuate((StgClosure *)bf->link);
2562 if (failed_to_evac) {
2563 failed_to_evac = rtsFalse;
2564 recordMutable((StgMutClosure *)bf);
2567 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2568 bf, info_type((StgClosure *)bf),
2569 bf->node, info_type(bf->node)));
2577 break; // nothing to do in this case
2579 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2581 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2582 (StgClosure *)fmbq->blocking_queue =
2583 evacuate((StgClosure *)fmbq->blocking_queue);
2584 if (failed_to_evac) {
2585 failed_to_evac = rtsFalse;
2586 recordMutable((StgMutClosure *)fmbq);
2589 belch("@@ scavenge: %p (%s) exciting, isn't it",
2590 p, info_type((StgClosure *)p)));
2596 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2600 if (failed_to_evac) {
2601 failed_to_evac = rtsFalse;
2602 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2605 // mark the next bit to indicate "scavenged"
2606 mark(q+1, Bdescr(q));
2608 } // while (!mark_stack_empty())
2610 // start a new linear scan if the mark stack overflowed at some point
2611 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2612 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2613 mark_stack_overflowed = rtsFalse;
2614 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2615 oldgen_scan = oldgen_scan_bd->start;
2618 if (oldgen_scan_bd) {
2619 // push a new thing on the mark stack
2621 // find a closure that is marked but not scavenged, and start
2623 while (oldgen_scan < oldgen_scan_bd->free
2624 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2628 if (oldgen_scan < oldgen_scan_bd->free) {
2630 // already scavenged?
2631 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2632 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2635 push_mark_stack(oldgen_scan);
2636 // ToDo: bump the linear scan by the actual size of the object
2637 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2641 oldgen_scan_bd = oldgen_scan_bd->link;
2642 if (oldgen_scan_bd != NULL) {
2643 oldgen_scan = oldgen_scan_bd->start;
2649 /* -----------------------------------------------------------------------------
2650 Scavenge one object.
2652 This is used for objects that are temporarily marked as mutable
2653 because they contain old-to-new generation pointers. Only certain
2654 objects can have this property.
2655 -------------------------------------------------------------------------- */
2658 scavenge_one(StgPtr p)
2660 const StgInfoTable *info;
2661 nat saved_evac_gen = evac_gen;
2664 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2665 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2667 info = get_itbl((StgClosure *)p);
2669 switch (info->type) {
2672 case FUN_1_0: // hardly worth specialising these guys
2692 case IND_OLDGEN_PERM:
2696 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2697 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2698 (StgClosure *)*q = evacuate((StgClosure *)*q);
2704 case SE_CAF_BLACKHOLE:
2709 case THUNK_SELECTOR:
2711 StgSelector *s = (StgSelector *)p;
2712 s->selectee = evacuate(s->selectee);
2717 // nothing to follow
2722 // follow everything
2725 evac_gen = 0; // repeatedly mutable
2726 recordMutable((StgMutClosure *)p);
2727 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2728 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2729 (StgClosure *)*p = evacuate((StgClosure *)*p);
2731 evac_gen = saved_evac_gen;
2732 failed_to_evac = rtsFalse;
2736 case MUT_ARR_PTRS_FROZEN:
2738 // follow everything
2741 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2742 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2743 (StgClosure *)*p = evacuate((StgClosure *)*p);
2750 StgTSO *tso = (StgTSO *)p;
2752 evac_gen = 0; // repeatedly mutable
2754 recordMutable((StgMutClosure *)tso);
2755 evac_gen = saved_evac_gen;
2756 failed_to_evac = rtsFalse;
2763 StgPAP* pap = (StgPAP *)p;
2764 pap->fun = evacuate(pap->fun);
2765 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2770 // This might happen if for instance a MUT_CONS was pointing to a
2771 // THUNK which has since been updated. The IND_OLDGEN will
2772 // be on the mutable list anyway, so we don't need to do anything
2777 barf("scavenge_one: strange object %d", (int)(info->type));
2780 no_luck = failed_to_evac;
2781 failed_to_evac = rtsFalse;
2785 /* -----------------------------------------------------------------------------
2786 Scavenging mutable lists.
2788 We treat the mutable list of each generation > N (i.e. all the
2789 generations older than the one being collected) as roots. We also
2790 remove non-mutable objects from the mutable list at this point.
2791 -------------------------------------------------------------------------- */
2794 scavenge_mut_once_list(generation *gen)
2796 const StgInfoTable *info;
2797 StgMutClosure *p, *next, *new_list;
2799 p = gen->mut_once_list;
2800 new_list = END_MUT_LIST;
2804 failed_to_evac = rtsFalse;
2806 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2808 // make sure the info pointer is into text space
2809 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2810 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2814 if (info->type==RBH)
2815 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2817 switch(info->type) {
2820 case IND_OLDGEN_PERM:
2822 /* Try to pull the indirectee into this generation, so we can
2823 * remove the indirection from the mutable list.
2825 ((StgIndOldGen *)p)->indirectee =
2826 evacuate(((StgIndOldGen *)p)->indirectee);
2828 #if 0 && defined(DEBUG)
2829 if (RtsFlags.DebugFlags.gc)
2830 /* Debugging code to print out the size of the thing we just
2834 StgPtr start = gen->steps[0].scan;
2835 bdescr *start_bd = gen->steps[0].scan_bd;
2837 scavenge(&gen->steps[0]);
2838 if (start_bd != gen->steps[0].scan_bd) {
2839 size += (P_)BLOCK_ROUND_UP(start) - start;
2840 start_bd = start_bd->link;
2841 while (start_bd != gen->steps[0].scan_bd) {
2842 size += BLOCK_SIZE_W;
2843 start_bd = start_bd->link;
2845 size += gen->steps[0].scan -
2846 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2848 size = gen->steps[0].scan - start;
2850 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2854 /* failed_to_evac might happen if we've got more than two
2855 * generations, we're collecting only generation 0, the
2856 * indirection resides in generation 2 and the indirectee is
2859 if (failed_to_evac) {
2860 failed_to_evac = rtsFalse;
2861 p->mut_link = new_list;
2864 /* the mut_link field of an IND_STATIC is overloaded as the
2865 * static link field too (it just so happens that we don't need
2866 * both at the same time), so we need to NULL it out when
2867 * removing this object from the mutable list because the static
2868 * link fields are all assumed to be NULL before doing a major
2876 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2877 * it from the mutable list if possible by promoting whatever it
2880 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2881 /* didn't manage to promote everything, so put the
2882 * MUT_CONS back on the list.
2884 p->mut_link = new_list;
2890 // shouldn't have anything else on the mutables list
2891 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2895 gen->mut_once_list = new_list;
2900 scavenge_mutable_list(generation *gen)
2902 const StgInfoTable *info;
2903 StgMutClosure *p, *next;
2905 p = gen->saved_mut_list;
2909 failed_to_evac = rtsFalse;
2911 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2913 // make sure the info pointer is into text space
2914 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2915 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2919 if (info->type==RBH)
2920 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2922 switch(info->type) {
2925 // follow everything
2926 p->mut_link = gen->mut_list;
2931 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2932 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2933 (StgClosure *)*q = evacuate((StgClosure *)*q);
2938 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2939 case MUT_ARR_PTRS_FROZEN:
2944 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2945 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2946 (StgClosure *)*q = evacuate((StgClosure *)*q);
2950 if (failed_to_evac) {
2951 failed_to_evac = rtsFalse;
2952 mkMutCons((StgClosure *)p, gen);
2958 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2959 p->mut_link = gen->mut_list;
2965 StgMVar *mvar = (StgMVar *)p;
2966 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2967 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2968 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2969 p->mut_link = gen->mut_list;
2976 StgTSO *tso = (StgTSO *)p;
2980 /* Don't take this TSO off the mutable list - it might still
2981 * point to some younger objects (because we set evac_gen to 0
2984 tso->mut_link = gen->mut_list;
2985 gen->mut_list = (StgMutClosure *)tso;
2991 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2992 (StgClosure *)bh->blocking_queue =
2993 evacuate((StgClosure *)bh->blocking_queue);
2994 p->mut_link = gen->mut_list;
2999 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3002 case IND_OLDGEN_PERM:
3003 /* Try to pull the indirectee into this generation, so we can
3004 * remove the indirection from the mutable list.
3007 ((StgIndOldGen *)p)->indirectee =
3008 evacuate(((StgIndOldGen *)p)->indirectee);
3011 if (failed_to_evac) {
3012 failed_to_evac = rtsFalse;
3013 p->mut_link = gen->mut_once_list;
3014 gen->mut_once_list = p;
3021 // HWL: check whether all of these are necessary
3023 case RBH: // cf. BLACKHOLE_BQ
3025 // nat size, ptrs, nonptrs, vhs;
3027 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3028 StgRBH *rbh = (StgRBH *)p;
3029 (StgClosure *)rbh->blocking_queue =
3030 evacuate((StgClosure *)rbh->blocking_queue);
3031 if (failed_to_evac) {
3032 failed_to_evac = rtsFalse;
3033 recordMutable((StgMutClosure *)rbh);
3035 // ToDo: use size of reverted closure here!
3036 p += BLACKHOLE_sizeW();
3042 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3043 // follow the pointer to the node which is being demanded
3044 (StgClosure *)bf->node =
3045 evacuate((StgClosure *)bf->node);
3046 // follow the link to the rest of the blocking queue
3047 (StgClosure *)bf->link =
3048 evacuate((StgClosure *)bf->link);
3049 if (failed_to_evac) {
3050 failed_to_evac = rtsFalse;
3051 recordMutable((StgMutClosure *)bf);
3053 p += sizeofW(StgBlockedFetch);
3059 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3062 p += sizeofW(StgFetchMe);
3063 break; // nothing to do in this case
3065 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3067 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3068 (StgClosure *)fmbq->blocking_queue =
3069 evacuate((StgClosure *)fmbq->blocking_queue);
3070 if (failed_to_evac) {
3071 failed_to_evac = rtsFalse;
3072 recordMutable((StgMutClosure *)fmbq);
3074 p += sizeofW(StgFetchMeBlockingQueue);
3080 // shouldn't have anything else on the mutables list
3081 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3088 scavenge_static(void)
3090 StgClosure* p = static_objects;
3091 const StgInfoTable *info;
3093 /* Always evacuate straight to the oldest generation for static
3095 evac_gen = oldest_gen->no;
3097 /* keep going until we've scavenged all the objects on the linked
3099 while (p != END_OF_STATIC_LIST) {
3103 if (info->type==RBH)
3104 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3106 // make sure the info pointer is into text space
3107 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3108 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3110 /* Take this object *off* the static_objects list,
3111 * and put it on the scavenged_static_objects list.
3113 static_objects = STATIC_LINK(info,p);
3114 STATIC_LINK(info,p) = scavenged_static_objects;
3115 scavenged_static_objects = p;
3117 switch (info -> type) {
3121 StgInd *ind = (StgInd *)p;
3122 ind->indirectee = evacuate(ind->indirectee);
3124 /* might fail to evacuate it, in which case we have to pop it
3125 * back on the mutable list (and take it off the
3126 * scavenged_static list because the static link and mut link
3127 * pointers are one and the same).
3129 if (failed_to_evac) {
3130 failed_to_evac = rtsFalse;
3131 scavenged_static_objects = IND_STATIC_LINK(p);
3132 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3133 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3147 next = (P_)p->payload + info->layout.payload.ptrs;
3148 // evacuate the pointers
3149 for (q = (P_)p->payload; q < next; q++) {
3150 (StgClosure *)*q = evacuate((StgClosure *)*q);
3156 barf("scavenge_static: strange closure %d", (int)(info->type));
3159 ASSERT(failed_to_evac == rtsFalse);
3161 /* get the next static object from the list. Remember, there might
3162 * be more stuff on this list now that we've done some evacuating!
3163 * (static_objects is a global)
3169 /* -----------------------------------------------------------------------------
3170 scavenge_stack walks over a section of stack and evacuates all the
3171 objects pointed to by it. We can use the same code for walking
3172 PAPs, since these are just sections of copied stack.
3173 -------------------------------------------------------------------------- */
3176 scavenge_stack(StgPtr p, StgPtr stack_end)
3179 const StgInfoTable* info;
3182 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3185 * Each time around this loop, we are looking at a chunk of stack
3186 * that starts with either a pending argument section or an
3187 * activation record.
3190 while (p < stack_end) {
3193 // If we've got a tag, skip over that many words on the stack
3194 if (IS_ARG_TAG((W_)q)) {
3199 /* Is q a pointer to a closure?
3201 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3203 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3204 ASSERT(closure_STATIC((StgClosure *)q));
3206 // otherwise, must be a pointer into the allocation space.
3209 (StgClosure *)*p = evacuate((StgClosure *)q);
3215 * Otherwise, q must be the info pointer of an activation
3216 * record. All activation records have 'bitmap' style layout
3219 info = get_itbl((StgClosure *)p);
3221 switch (info->type) {
3223 // Dynamic bitmap: the mask is stored on the stack
3225 bitmap = ((StgRetDyn *)p)->liveness;
3226 p = (P_)&((StgRetDyn *)p)->payload[0];
3229 // probably a slow-entry point return address:
3237 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3238 old_p, p, old_p+1));
3240 p++; // what if FHS!=1 !? -- HWL
3245 /* Specialised code for update frames, since they're so common.
3246 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3247 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3251 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3253 p += sizeofW(StgUpdateFrame);
3256 frame->updatee = evacuate(frame->updatee);
3258 #else // specialised code for update frames, not sure if it's worth it.
3260 nat type = get_itbl(frame->updatee)->type;
3262 if (type == EVACUATED) {
3263 frame->updatee = evacuate(frame->updatee);
3266 bdescr *bd = Bdescr((P_)frame->updatee);
3268 if (bd->gen_no > N) {
3269 if (bd->gen_no < evac_gen) {
3270 failed_to_evac = rtsTrue;
3275 // Don't promote blackholes
3277 if (!(stp->gen_no == 0 &&
3279 stp->no == stp->gen->n_steps-1)) {
3286 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3287 sizeofW(StgHeader), stp);
3288 frame->updatee = to;
3291 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3292 frame->updatee = to;
3293 recordMutable((StgMutClosure *)to);
3296 /* will never be SE_{,CAF_}BLACKHOLE, since we
3297 don't push an update frame for single-entry thunks. KSW 1999-01. */
3298 barf("scavenge_stack: UPDATE_FRAME updatee");
3304 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3311 bitmap = info->layout.bitmap;
3313 // this assumes that the payload starts immediately after the info-ptr
3315 while (bitmap != 0) {
3316 if ((bitmap & 1) == 0) {
3317 (StgClosure *)*p = evacuate((StgClosure *)*p);
3320 bitmap = bitmap >> 1;
3327 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3332 StgLargeBitmap *large_bitmap;
3335 large_bitmap = info->layout.large_bitmap;
3338 for (i=0; i<large_bitmap->size; i++) {
3339 bitmap = large_bitmap->bitmap[i];
3340 q = p + BITS_IN(W_);
3341 while (bitmap != 0) {
3342 if ((bitmap & 1) == 0) {
3343 (StgClosure *)*p = evacuate((StgClosure *)*p);
3346 bitmap = bitmap >> 1;
3348 if (i+1 < large_bitmap->size) {
3350 (StgClosure *)*p = evacuate((StgClosure *)*p);
3356 // and don't forget to follow the SRT
3361 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3366 /*-----------------------------------------------------------------------------
3367 scavenge the large object list.
3369 evac_gen set by caller; similar games played with evac_gen as with
3370 scavenge() - see comment at the top of scavenge(). Most large
3371 objects are (repeatedly) mutable, so most of the time evac_gen will
3373 --------------------------------------------------------------------------- */
3376 scavenge_large(step *stp)
3381 bd = stp->new_large_objects;
3383 for (; bd != NULL; bd = stp->new_large_objects) {
3385 /* take this object *off* the large objects list and put it on
3386 * the scavenged large objects list. This is so that we can
3387 * treat new_large_objects as a stack and push new objects on
3388 * the front when evacuating.
3390 stp->new_large_objects = bd->link;
3391 dbl_link_onto(bd, &stp->scavenged_large_objects);
3393 // update the block count in this step.
3394 stp->n_scavenged_large_blocks += bd->blocks;
3397 if (scavenge_one(p)) {
3398 mkMutCons((StgClosure *)p, stp->gen);
3403 /* -----------------------------------------------------------------------------
3404 Initialising the static object & mutable lists
3405 -------------------------------------------------------------------------- */
3408 zero_static_object_list(StgClosure* first_static)
3412 const StgInfoTable *info;
3414 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3416 link = STATIC_LINK(info, p);
3417 STATIC_LINK(info,p) = NULL;
3421 /* This function is only needed because we share the mutable link
3422 * field with the static link field in an IND_STATIC, so we have to
3423 * zero the mut_link field before doing a major GC, which needs the
3424 * static link field.
3426 * It doesn't do any harm to zero all the mutable link fields on the
3431 zero_mutable_list( StgMutClosure *first )
3433 StgMutClosure *next, *c;
3435 for (c = first; c != END_MUT_LIST; c = next) {
3441 /* -----------------------------------------------------------------------------
3443 -------------------------------------------------------------------------- */
3450 for (c = (StgIndStatic *)caf_list; c != NULL;
3451 c = (StgIndStatic *)c->static_link)
3453 c->header.info = c->saved_info;
3454 c->saved_info = NULL;
3455 // could, but not necessary: c->static_link = NULL;
3461 scavengeCAFs( void )
3466 for (c = (StgIndStatic *)caf_list; c != NULL;
3467 c = (StgIndStatic *)c->static_link)
3469 c->indirectee = evacuate(c->indirectee);
3473 /* -----------------------------------------------------------------------------
3474 Sanity code for CAF garbage collection.
3476 With DEBUG turned on, we manage a CAF list in addition to the SRT
3477 mechanism. After GC, we run down the CAF list and blackhole any
3478 CAFs which have been garbage collected. This means we get an error
3479 whenever the program tries to enter a garbage collected CAF.
3481 Any garbage collected CAFs are taken off the CAF list at the same
3483 -------------------------------------------------------------------------- */
3485 #if 0 && defined(DEBUG)
3492 const StgInfoTable *info;
3503 ASSERT(info->type == IND_STATIC);
3505 if (STATIC_LINK(info,p) == NULL) {
3506 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3508 SET_INFO(p,&stg_BLACKHOLE_info);
3509 p = STATIC_LINK2(info,p);
3513 pp = &STATIC_LINK2(info,p);
3520 // belch("%d CAFs live", i);
3525 /* -----------------------------------------------------------------------------
3528 Whenever a thread returns to the scheduler after possibly doing
3529 some work, we have to run down the stack and black-hole all the
3530 closures referred to by update frames.
3531 -------------------------------------------------------------------------- */
3534 threadLazyBlackHole(StgTSO *tso)
3536 StgUpdateFrame *update_frame;
3537 StgBlockingQueue *bh;
3540 stack_end = &tso->stack[tso->stack_size];
3541 update_frame = tso->su;
3544 switch (get_itbl(update_frame)->type) {
3547 update_frame = ((StgCatchFrame *)update_frame)->link;
3551 bh = (StgBlockingQueue *)update_frame->updatee;
3553 /* if the thunk is already blackholed, it means we've also
3554 * already blackholed the rest of the thunks on this stack,
3555 * so we can stop early.
3557 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3558 * don't interfere with this optimisation.
3560 if (bh->header.info == &stg_BLACKHOLE_info) {
3564 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3565 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3566 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3567 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3569 SET_INFO(bh,&stg_BLACKHOLE_info);
3572 update_frame = update_frame->link;
3576 update_frame = ((StgSeqFrame *)update_frame)->link;
3582 barf("threadPaused");
3588 /* -----------------------------------------------------------------------------
3591 * Code largely pinched from old RTS, then hacked to bits. We also do
3592 * lazy black holing here.
3594 * -------------------------------------------------------------------------- */
3597 threadSqueezeStack(StgTSO *tso)
3599 lnat displacement = 0;
3600 StgUpdateFrame *frame;
3601 StgUpdateFrame *next_frame; // Temporally next
3602 StgUpdateFrame *prev_frame; // Temporally previous
3604 rtsBool prev_was_update_frame;
3606 StgUpdateFrame *top_frame;
3607 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3609 void printObj( StgClosure *obj ); // from Printer.c
3611 top_frame = tso->su;
3614 bottom = &(tso->stack[tso->stack_size]);
3617 /* There must be at least one frame, namely the STOP_FRAME.
3619 ASSERT((P_)frame < bottom);
3621 /* Walk down the stack, reversing the links between frames so that
3622 * we can walk back up as we squeeze from the bottom. Note that
3623 * next_frame and prev_frame refer to next and previous as they were
3624 * added to the stack, rather than the way we see them in this
3625 * walk. (It makes the next loop less confusing.)
3627 * Stop if we find an update frame pointing to a black hole
3628 * (see comment in threadLazyBlackHole()).
3632 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3633 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3634 prev_frame = frame->link;
3635 frame->link = next_frame;
3640 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3641 printObj((StgClosure *)prev_frame);
3642 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3645 switch (get_itbl(frame)->type) {
3648 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3661 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3663 printObj((StgClosure *)prev_frame);
3666 if (get_itbl(frame)->type == UPDATE_FRAME
3667 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3672 /* Now, we're at the bottom. Frame points to the lowest update
3673 * frame on the stack, and its link actually points to the frame
3674 * above. We have to walk back up the stack, squeezing out empty
3675 * update frames and turning the pointers back around on the way
3678 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3679 * we never want to eliminate it anyway. Just walk one step up
3680 * before starting to squeeze. When you get to the topmost frame,
3681 * remember that there are still some words above it that might have
3688 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3691 * Loop through all of the frames (everything except the very
3692 * bottom). Things are complicated by the fact that we have
3693 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3694 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3696 while (frame != NULL) {
3698 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3699 rtsBool is_update_frame;
3701 next_frame = frame->link;
3702 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3705 * 1. both the previous and current frame are update frames
3706 * 2. the current frame is empty
3708 if (prev_was_update_frame && is_update_frame &&
3709 (P_)prev_frame == frame_bottom + displacement) {
3711 // Now squeeze out the current frame
3712 StgClosure *updatee_keep = prev_frame->updatee;
3713 StgClosure *updatee_bypass = frame->updatee;
3716 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3720 /* Deal with blocking queues. If both updatees have blocked
3721 * threads, then we should merge the queues into the update
3722 * frame that we're keeping.
3724 * Alternatively, we could just wake them up: they'll just go
3725 * straight to sleep on the proper blackhole! This is less code
3726 * and probably less bug prone, although it's probably much
3729 #if 0 // do it properly...
3730 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3731 # error Unimplemented lazy BH warning. (KSW 1999-01)
3733 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3734 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3736 // Sigh. It has one. Don't lose those threads!
3737 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3738 // Urgh. Two queues. Merge them.
3739 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3741 while (keep_tso->link != END_TSO_QUEUE) {
3742 keep_tso = keep_tso->link;
3744 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3747 // For simplicity, just swap the BQ for the BH
3748 P_ temp = updatee_keep;
3750 updatee_keep = updatee_bypass;
3751 updatee_bypass = temp;
3753 // Record the swap in the kept frame (below)
3754 prev_frame->updatee = updatee_keep;
3759 TICK_UPD_SQUEEZED();
3760 /* wasn't there something about update squeezing and ticky to be
3761 * sorted out? oh yes: we aren't counting each enter properly
3762 * in this case. See the log somewhere. KSW 1999-04-21
3764 * Check two things: that the two update frames don't point to
3765 * the same object, and that the updatee_bypass isn't already an
3766 * indirection. Both of these cases only happen when we're in a
3767 * block hole-style loop (and there are multiple update frames
3768 * on the stack pointing to the same closure), but they can both
3769 * screw us up if we don't check.
3771 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3772 // this wakes the threads up
3773 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3776 sp = (P_)frame - 1; // sp = stuff to slide
3777 displacement += sizeofW(StgUpdateFrame);
3780 // No squeeze for this frame
3781 sp = frame_bottom - 1; // Keep the current frame
3783 /* Do lazy black-holing.
3785 if (is_update_frame) {
3786 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3787 if (bh->header.info != &stg_BLACKHOLE_info &&
3788 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3789 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3790 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3791 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3794 /* zero out the slop so that the sanity checker can tell
3795 * where the next closure is.
3798 StgInfoTable *info = get_itbl(bh);
3799 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3800 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3801 * info is used for a different purpose, and it's exactly the
3802 * same size as a BLACKHOLE in any case.
3804 if (info->type != THUNK_SELECTOR) {
3805 for (i = np; i < np + nw; i++) {
3806 ((StgClosure *)bh)->payload[i] = 0;
3811 SET_INFO(bh,&stg_BLACKHOLE_info);
3815 // Fix the link in the current frame (should point to the frame below)
3816 frame->link = prev_frame;
3817 prev_was_update_frame = is_update_frame;
3820 // Now slide all words from sp up to the next frame
3822 if (displacement > 0) {
3823 P_ next_frame_bottom;
3825 if (next_frame != NULL)
3826 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3828 next_frame_bottom = tso->sp - 1;
3832 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3836 while (sp >= next_frame_bottom) {
3837 sp[displacement] = *sp;
3841 (P_)prev_frame = (P_)frame + displacement;
3845 tso->sp += displacement;
3846 tso->su = prev_frame;
3849 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3850 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3855 /* -----------------------------------------------------------------------------
3858 * We have to prepare for GC - this means doing lazy black holing
3859 * here. We also take the opportunity to do stack squeezing if it's
3861 * -------------------------------------------------------------------------- */
3863 threadPaused(StgTSO *tso)
3865 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3866 threadSqueezeStack(tso); // does black holing too
3868 threadLazyBlackHole(tso);
3871 /* -----------------------------------------------------------------------------
3873 * -------------------------------------------------------------------------- */
3877 printMutOnceList(generation *gen)
3879 StgMutClosure *p, *next;
3881 p = gen->mut_once_list;
3884 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3885 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3886 fprintf(stderr, "%p (%s), ",
3887 p, info_type((StgClosure *)p));
3889 fputc('\n', stderr);
3893 printMutableList(generation *gen)
3895 StgMutClosure *p, *next;
3900 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3901 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3902 fprintf(stderr, "%p (%s), ",
3903 p, info_type((StgClosure *)p));
3905 fputc('\n', stderr);
3908 static inline rtsBool
3909 maybeLarge(StgClosure *closure)
3911 StgInfoTable *info = get_itbl(closure);
3913 /* closure types that may be found on the new_large_objects list;
3914 see scavenge_large */
3915 return (info->type == MUT_ARR_PTRS ||
3916 info->type == MUT_ARR_PTRS_FROZEN ||
3917 info->type == TSO ||
3918 info->type == ARR_WORDS);