1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.115 2001/08/07 10:49:49 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
44 /* STATIC OBJECT LIST.
47 * We maintain a linked list of static objects that are still live.
48 * The requirements for this list are:
50 * - we need to scan the list while adding to it, in order to
51 * scavenge all the static objects (in the same way that
52 * breadth-first scavenging works for dynamic objects).
54 * - we need to be able to tell whether an object is already on
55 * the list, to break loops.
57 * Each static object has a "static link field", which we use for
58 * linking objects on to the list. We use a stack-type list, consing
59 * objects on the front as they are added (this means that the
60 * scavenge phase is depth-first, not breadth-first, but that
63 * A separate list is kept for objects that have been scavenged
64 * already - this is so that we can zero all the marks afterwards.
66 * An object is on the list if its static link field is non-zero; this
67 * means that we have to mark the end of the list with '1', not NULL.
69 * Extra notes for generational GC:
71 * Each generation has a static object list associated with it. When
72 * collecting generations up to N, we treat the static object lists
73 * from generations > N as roots.
75 * We build up a static object list while collecting generations 0..N,
76 * which is then appended to the static object list of generation N+1.
78 StgClosure* static_objects; // live static objects
79 StgClosure* scavenged_static_objects; // static objects scavenged so far
81 /* N is the oldest generation being collected, where the generations
82 * are numbered starting at 0. A major GC (indicated by the major_gc
83 * flag) is when we're collecting all generations. We only attempt to
84 * deal with static objects and GC CAFs when doing a major GC.
87 static rtsBool major_gc;
89 /* Youngest generation that objects should be evacuated to in
90 * evacuate(). (Logically an argument to evacuate, but it's static
91 * a lot of the time so we optimise it into a global variable).
97 StgWeak *old_weak_ptr_list; // also pending finaliser list
98 static rtsBool weak_done; // all done for this pass
100 /* List of all threads during GC
102 static StgTSO *old_all_threads;
103 static StgTSO *resurrected_threads;
105 /* Flag indicating failure to evacuate an object to the desired
108 static rtsBool failed_to_evac;
110 /* Old to-space (used for two-space collector only)
112 bdescr *old_to_blocks;
114 /* Data used for allocation area sizing.
116 lnat new_blocks; // blocks allocated during this GC
117 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
119 /* Used to avoid long recursion due to selector thunks
121 lnat thunk_selector_depth = 0;
122 #define MAX_THUNK_SELECTOR_DEPTH 256
124 /* -----------------------------------------------------------------------------
125 Static function declarations
126 -------------------------------------------------------------------------- */
128 static void mark_root ( StgClosure **root );
129 static StgClosure * evacuate ( StgClosure *q );
130 static void zero_static_object_list ( StgClosure* first_static );
131 static void zero_mutable_list ( StgMutClosure *first );
133 static rtsBool traverse_weak_ptr_list ( void );
134 static void mark_weak_ptr_list ( StgWeak **list );
136 static void scavenge ( step * );
137 static void scavenge_mark_stack ( void );
138 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
139 static rtsBool scavenge_one ( StgPtr p );
140 static void scavenge_large ( step * );
141 static void scavenge_static ( void );
142 static void scavenge_mutable_list ( generation *g );
143 static void scavenge_mut_once_list ( generation *g );
144 static void scavengeCAFs ( void );
146 #if 0 && defined(DEBUG)
147 static void gcCAFs ( void );
150 /* -----------------------------------------------------------------------------
151 inline functions etc. for dealing with the mark bitmap & stack.
152 -------------------------------------------------------------------------- */
154 #define MARK_STACK_BLOCKS 4
156 static bdescr *mark_stack_bdescr;
157 static StgPtr *mark_stack;
158 static StgPtr *mark_sp;
159 static StgPtr *mark_splim;
161 // Flag and pointers used for falling back to a linear scan when the
162 // mark stack overflows.
163 static rtsBool mark_stack_overflowed;
164 static bdescr *oldgen_scan_bd;
165 static StgPtr oldgen_scan;
167 static inline rtsBool
168 mark_stack_empty(void)
170 return mark_sp == mark_stack;
173 static inline rtsBool
174 mark_stack_full(void)
176 return mark_sp >= mark_splim;
180 reset_mark_stack(void)
182 mark_sp = mark_stack;
186 push_mark_stack(StgPtr p)
197 /* -----------------------------------------------------------------------------
200 For garbage collecting generation N (and all younger generations):
202 - follow all pointers in the root set. the root set includes all
203 mutable objects in all steps in all generations.
205 - for each pointer, evacuate the object it points to into either
206 + to-space in the next higher step in that generation, if one exists,
207 + if the object's generation == N, then evacuate it to the next
208 generation if one exists, or else to-space in the current
210 + if the object's generation < N, then evacuate it to to-space
211 in the next generation.
213 - repeatedly scavenge to-space from each step in each generation
214 being collected until no more objects can be evacuated.
216 - free from-space in each step, and set from-space = to-space.
218 -------------------------------------------------------------------------- */
221 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
225 lnat live, allocated, collected = 0, copied = 0;
226 lnat oldgen_saved_blocks = 0;
230 CostCentreStack *prev_CCS;
233 #if defined(DEBUG) && defined(GRAN)
234 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
238 // tell the stats department that we've started a GC
241 // Init stats and print par specific (timing) info
242 PAR_TICKY_PAR_START();
244 // attribute any costs to CCS_GC
250 /* Approximate how much we allocated.
251 * Todo: only when generating stats?
253 allocated = calcAllocated();
255 /* Figure out which generation to collect
257 if (force_major_gc) {
258 N = RtsFlags.GcFlags.generations - 1;
262 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
263 if (generations[g].steps[0].n_blocks +
264 generations[g].steps[0].n_large_blocks
265 >= generations[g].max_blocks) {
269 major_gc = (N == RtsFlags.GcFlags.generations-1);
272 #ifdef RTS_GTK_FRONTPANEL
273 if (RtsFlags.GcFlags.frontpanel) {
274 updateFrontPanelBeforeGC(N);
278 // check stack sanity *before* GC (ToDo: check all threads)
280 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
282 IF_DEBUG(sanity, checkFreeListSanity());
284 /* Initialise the static object lists
286 static_objects = END_OF_STATIC_LIST;
287 scavenged_static_objects = END_OF_STATIC_LIST;
289 /* zero the mutable list for the oldest generation (see comment by
290 * zero_mutable_list below).
293 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
296 /* Save the old to-space if we're doing a two-space collection
298 if (RtsFlags.GcFlags.generations == 1) {
299 old_to_blocks = g0s0->to_blocks;
300 g0s0->to_blocks = NULL;
303 /* Keep a count of how many new blocks we allocated during this GC
304 * (used for resizing the allocation area, later).
308 /* Initialise to-space in all the generations/steps that we're
311 for (g = 0; g <= N; g++) {
312 generations[g].mut_once_list = END_MUT_LIST;
313 generations[g].mut_list = END_MUT_LIST;
315 for (s = 0; s < generations[g].n_steps; s++) {
317 // generation 0, step 0 doesn't need to-space
318 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
322 /* Get a free block for to-space. Extra blocks will be chained on
326 stp = &generations[g].steps[s];
327 ASSERT(stp->gen_no == g);
328 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
332 bd->flags = BF_EVACUATED; // it's a to-space block
334 stp->hpLim = stp->hp + BLOCK_SIZE_W;
337 stp->n_to_blocks = 1;
338 stp->scan = bd->start;
340 stp->new_large_objects = NULL;
341 stp->scavenged_large_objects = NULL;
342 stp->n_scavenged_large_blocks = 0;
344 // mark the large objects as not evacuated yet
345 for (bd = stp->large_objects; bd; bd = bd->link) {
346 bd->flags = BF_LARGE;
349 // for a compacted step, we need to allocate the bitmap
350 if (stp->is_compacted) {
351 nat bitmap_size; // in bytes
352 bdescr *bitmap_bdescr;
355 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
357 if (bitmap_size > 0) {
358 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
360 stp->bitmap = bitmap_bdescr;
361 bitmap = bitmap_bdescr->start;
363 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
364 bitmap_size, bitmap););
366 // don't forget to fill it with zeros!
367 memset(bitmap, 0, bitmap_size);
369 // for each block in this step, point to its bitmap from the
371 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
372 bd->u.bitmap = bitmap;
373 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
380 /* make sure the older generations have at least one block to
381 * allocate into (this makes things easier for copy(), see below.
383 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
384 for (s = 0; s < generations[g].n_steps; s++) {
385 stp = &generations[g].steps[s];
386 if (stp->hp_bd == NULL) {
387 ASSERT(stp->blocks == NULL);
392 bd->flags = 0; // *not* a to-space block or a large object
394 stp->hpLim = stp->hp + BLOCK_SIZE_W;
400 /* Set the scan pointer for older generations: remember we
401 * still have to scavenge objects that have been promoted. */
403 stp->scan_bd = stp->hp_bd;
404 stp->to_blocks = NULL;
405 stp->n_to_blocks = 0;
406 stp->new_large_objects = NULL;
407 stp->scavenged_large_objects = NULL;
408 stp->n_scavenged_large_blocks = 0;
412 /* Allocate a mark stack if we're doing a major collection.
415 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
416 mark_stack = (StgPtr *)mark_stack_bdescr->start;
417 mark_sp = mark_stack;
418 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
420 mark_stack_bdescr = NULL;
423 /* -----------------------------------------------------------------------
424 * follow all the roots that we know about:
425 * - mutable lists from each generation > N
426 * we want to *scavenge* these roots, not evacuate them: they're not
427 * going to move in this GC.
428 * Also: do them in reverse generation order. This is because we
429 * often want to promote objects that are pointed to by older
430 * generations early, so we don't have to repeatedly copy them.
431 * Doing the generations in reverse order ensures that we don't end
432 * up in the situation where we want to evac an object to gen 3 and
433 * it has already been evaced to gen 2.
437 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
438 generations[g].saved_mut_list = generations[g].mut_list;
439 generations[g].mut_list = END_MUT_LIST;
442 // Do the mut-once lists first
443 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
444 IF_PAR_DEBUG(verbose,
445 printMutOnceList(&generations[g]));
446 scavenge_mut_once_list(&generations[g]);
448 for (st = generations[g].n_steps-1; st >= 0; st--) {
449 scavenge(&generations[g].steps[st]);
453 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
454 IF_PAR_DEBUG(verbose,
455 printMutableList(&generations[g]));
456 scavenge_mutable_list(&generations[g]);
458 for (st = generations[g].n_steps-1; st >= 0; st--) {
459 scavenge(&generations[g].steps[st]);
466 /* follow all the roots that the application knows about.
469 get_roots(mark_root);
472 /* And don't forget to mark the TSO if we got here direct from
474 /* Not needed in a seq version?
476 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
480 // Mark the entries in the GALA table of the parallel system
481 markLocalGAs(major_gc);
482 // Mark all entries on the list of pending fetches
483 markPendingFetches(major_gc);
486 /* Mark the weak pointer list, and prepare to detect dead weak
489 mark_weak_ptr_list(&weak_ptr_list);
490 old_weak_ptr_list = weak_ptr_list;
491 weak_ptr_list = NULL;
492 weak_done = rtsFalse;
494 /* The all_threads list is like the weak_ptr_list.
495 * See traverse_weak_ptr_list() for the details.
497 old_all_threads = all_threads;
498 all_threads = END_TSO_QUEUE;
499 resurrected_threads = END_TSO_QUEUE;
501 /* Mark the stable pointer table.
503 markStablePtrTable(mark_root);
507 /* ToDo: To fix the caf leak, we need to make the commented out
508 * parts of this code do something sensible - as described in
511 extern void markHugsObjects(void);
516 /* -------------------------------------------------------------------------
517 * Repeatedly scavenge all the areas we know about until there's no
518 * more scavenging to be done.
525 // scavenge static objects
526 if (major_gc && static_objects != END_OF_STATIC_LIST) {
527 IF_DEBUG(sanity, checkStaticObjects(static_objects));
531 /* When scavenging the older generations: Objects may have been
532 * evacuated from generations <= N into older generations, and we
533 * need to scavenge these objects. We're going to try to ensure that
534 * any evacuations that occur move the objects into at least the
535 * same generation as the object being scavenged, otherwise we
536 * have to create new entries on the mutable list for the older
540 // scavenge each step in generations 0..maxgen
546 // scavenge objects in compacted generation
547 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
548 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
549 scavenge_mark_stack();
553 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
554 for (st = generations[gen].n_steps; --st >= 0; ) {
555 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
558 stp = &generations[gen].steps[st];
560 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
565 if (stp->new_large_objects != NULL) {
574 if (flag) { goto loop; }
577 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
583 // Reconstruct the Global Address tables used in GUM
584 rebuildGAtables(major_gc);
585 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
588 // Now see which stable names are still alive.
591 // Tidy the end of the to-space chains
592 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
593 for (s = 0; s < generations[g].n_steps; s++) {
594 stp = &generations[g].steps[s];
595 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
596 stp->hp_bd->free = stp->hp;
597 stp->hp_bd->link = NULL;
602 // NO MORE EVACUATION AFTER THIS POINT!
603 // Finally: compaction of the oldest generation.
604 if (major_gc && oldest_gen->steps[0].is_compacted) {
605 // save number of blocks for stats
606 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
610 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
612 /* run through all the generations/steps and tidy up
614 copied = new_blocks * BLOCK_SIZE_W;
615 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
618 generations[g].collections++; // for stats
621 for (s = 0; s < generations[g].n_steps; s++) {
623 stp = &generations[g].steps[s];
625 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
626 // stats information: how much we copied
628 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
633 // for generations we collected...
636 // rough calculation of garbage collected, for stats output
637 if (stp->is_compacted) {
638 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
640 collected += stp->n_blocks * BLOCK_SIZE_W;
643 /* free old memory and shift to-space into from-space for all
644 * the collected steps (except the allocation area). These
645 * freed blocks will probaby be quickly recycled.
647 if (!(g == 0 && s == 0)) {
648 if (stp->is_compacted) {
649 // for a compacted step, just shift the new to-space
650 // onto the front of the now-compacted existing blocks.
651 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
652 bd->flags &= ~BF_EVACUATED; // now from-space
654 // tack the new blocks on the end of the existing blocks
655 if (stp->blocks == NULL) {
656 stp->blocks = stp->to_blocks;
658 for (bd = stp->blocks; bd != NULL; bd = next) {
661 bd->link = stp->to_blocks;
665 // add the new blocks to the block tally
666 stp->n_blocks += stp->n_to_blocks;
668 freeChain(stp->blocks);
669 stp->blocks = stp->to_blocks;
670 stp->n_blocks = stp->n_to_blocks;
671 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
672 bd->flags &= ~BF_EVACUATED; // now from-space
675 stp->to_blocks = NULL;
676 stp->n_to_blocks = 0;
679 /* LARGE OBJECTS. The current live large objects are chained on
680 * scavenged_large, having been moved during garbage
681 * collection from large_objects. Any objects left on
682 * large_objects list are therefore dead, so we free them here.
684 for (bd = stp->large_objects; bd != NULL; bd = next) {
690 // update the count of blocks used by large objects
691 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
692 bd->flags &= ~BF_EVACUATED;
694 stp->large_objects = stp->scavenged_large_objects;
695 stp->n_large_blocks = stp->n_scavenged_large_blocks;
698 // for older generations...
700 /* For older generations, we need to append the
701 * scavenged_large_object list (i.e. large objects that have been
702 * promoted during this GC) to the large_object list for that step.
704 for (bd = stp->scavenged_large_objects; bd; bd = next) {
706 bd->flags &= ~BF_EVACUATED;
707 dbl_link_onto(bd, &stp->large_objects);
710 // add the new blocks we promoted during this GC
711 stp->n_blocks += stp->n_to_blocks;
712 stp->n_large_blocks += stp->n_scavenged_large_blocks;
717 /* Reset the sizes of the older generations when we do a major
720 * CURRENT STRATEGY: make all generations except zero the same size.
721 * We have to stay within the maximum heap size, and leave a certain
722 * percentage of the maximum heap size available to allocate into.
724 if (major_gc && RtsFlags.GcFlags.generations > 1) {
725 nat live, size, min_alloc;
726 nat max = RtsFlags.GcFlags.maxHeapSize;
727 nat gens = RtsFlags.GcFlags.generations;
729 // live in the oldest generations
730 live = oldest_gen->steps[0].n_blocks +
731 oldest_gen->steps[0].n_large_blocks;
733 // default max size for all generations except zero
734 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
735 RtsFlags.GcFlags.minOldGenSize);
737 // minimum size for generation zero
738 min_alloc = (RtsFlags.GcFlags.pcFreeHeap * max) / 200;
740 // if we're going to go over the maximum heap size, reduce the
741 // size of the generations accordingly. The calculation is
742 // different if compaction is turned on, because we don't need
743 // to double the space required to collect the old generation.
745 if (RtsFlags.GcFlags.compact) {
746 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
747 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
750 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
751 size = (max - min_alloc) / ((gens - 1) * 2);
761 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
762 min_alloc, size, max);
765 for (g = 0; g < gens; g++) {
766 generations[g].max_blocks = size;
769 // Auto-enable compaction when the residency reaches a
770 // certain percentage of the maximum heap size (default: 30%).
771 if (RtsFlags.GcFlags.compact &&
772 oldest_gen->steps[0].n_blocks >
773 (RtsFlags.GcFlags.compactThreshold * max) / 100) {
774 oldest_gen->steps[0].is_compacted = 1;
775 // fprintf(stderr,"compaction: on\n", live);
777 oldest_gen->steps[0].is_compacted = 0;
778 // fprintf(stderr,"compaction: off\n", live);
782 // Guess the amount of live data for stats.
785 /* Free the small objects allocated via allocate(), since this will
786 * all have been copied into G0S1 now.
788 if (small_alloc_list != NULL) {
789 freeChain(small_alloc_list);
791 small_alloc_list = NULL;
795 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
797 /* Free the mark stack.
799 if (mark_stack_bdescr != NULL) {
800 freeGroup(mark_stack_bdescr);
805 for (g = 0; g <= N; g++) {
806 for (s = 0; s < generations[g].n_steps; s++) {
807 stp = &generations[g].steps[s];
808 if (stp->is_compacted && stp->bitmap != NULL) {
809 freeGroup(stp->bitmap);
814 /* Two-space collector:
815 * Free the old to-space, and estimate the amount of live data.
817 if (RtsFlags.GcFlags.generations == 1) {
820 if (old_to_blocks != NULL) {
821 freeChain(old_to_blocks);
823 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
824 bd->flags = 0; // now from-space
827 /* For a two-space collector, we need to resize the nursery. */
829 /* set up a new nursery. Allocate a nursery size based on a
830 * function of the amount of live data (by default a factor of 2)
831 * Use the blocks from the old nursery if possible, freeing up any
834 * If we get near the maximum heap size, then adjust our nursery
835 * size accordingly. If the nursery is the same size as the live
836 * data (L), then we need 3L bytes. We can reduce the size of the
837 * nursery to bring the required memory down near 2L bytes.
839 * A normal 2-space collector would need 4L bytes to give the same
840 * performance we get from 3L bytes, reducing to the same
841 * performance at 2L bytes.
843 blocks = g0s0->n_to_blocks;
845 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
846 RtsFlags.GcFlags.maxHeapSize ) {
847 long adjusted_blocks; // signed on purpose
850 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
851 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
852 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
853 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
856 blocks = adjusted_blocks;
859 blocks *= RtsFlags.GcFlags.oldGenFactor;
860 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
861 blocks = RtsFlags.GcFlags.minAllocAreaSize;
864 resizeNursery(blocks);
867 /* Generational collector:
868 * If the user has given us a suggested heap size, adjust our
869 * allocation area to make best use of the memory available.
872 if (RtsFlags.GcFlags.heapSizeSuggestion) {
874 nat needed = calcNeeded(); // approx blocks needed at next GC
876 /* Guess how much will be live in generation 0 step 0 next time.
877 * A good approximation is obtained by finding the
878 * percentage of g0s0 that was live at the last minor GC.
881 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
884 /* Estimate a size for the allocation area based on the
885 * information available. We might end up going slightly under
886 * or over the suggested heap size, but we should be pretty
889 * Formula: suggested - needed
890 * ----------------------------
891 * 1 + g0s0_pcnt_kept/100
893 * where 'needed' is the amount of memory needed at the next
894 * collection for collecting all steps except g0s0.
897 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
898 (100 + (long)g0s0_pcnt_kept);
900 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
901 blocks = RtsFlags.GcFlags.minAllocAreaSize;
904 resizeNursery((nat)blocks);
908 // mark the garbage collected CAFs as dead
909 #if 0 && defined(DEBUG) // doesn't work at the moment
910 if (major_gc) { gcCAFs(); }
913 // zero the scavenged static object list
915 zero_static_object_list(scavenged_static_objects);
921 // start any pending finalizers
922 scheduleFinalizers(old_weak_ptr_list);
924 // send exceptions to any threads which were about to die
925 resurrectThreads(resurrected_threads);
927 // Update the stable pointer hash table.
928 updateStablePtrTable(major_gc);
930 // check sanity after GC
931 IF_DEBUG(sanity, checkSanity());
933 // extra GC trace info
934 IF_DEBUG(gc, statDescribeGens());
937 // symbol-table based profiling
938 /* heapCensus(to_blocks); */ /* ToDo */
941 // restore enclosing cost centre
947 // check for memory leaks if sanity checking is on
948 IF_DEBUG(sanity, memInventory());
950 #ifdef RTS_GTK_FRONTPANEL
951 if (RtsFlags.GcFlags.frontpanel) {
952 updateFrontPanelAfterGC( N, live );
956 // ok, GC over: tell the stats department what happened.
957 stat_endGC(allocated, collected, live, copied, N);
963 /* -----------------------------------------------------------------------------
966 traverse_weak_ptr_list is called possibly many times during garbage
967 collection. It returns a flag indicating whether it did any work
968 (i.e. called evacuate on any live pointers).
970 Invariant: traverse_weak_ptr_list is called when the heap is in an
971 idempotent state. That means that there are no pending
972 evacuate/scavenge operations. This invariant helps the weak
973 pointer code decide which weak pointers are dead - if there are no
974 new live weak pointers, then all the currently unreachable ones are
977 For generational GC: we just don't try to finalize weak pointers in
978 older generations than the one we're collecting. This could
979 probably be optimised by keeping per-generation lists of weak
980 pointers, but for a few weak pointers this scheme will work.
981 -------------------------------------------------------------------------- */
984 traverse_weak_ptr_list(void)
986 StgWeak *w, **last_w, *next_w;
988 rtsBool flag = rtsFalse;
990 if (weak_done) { return rtsFalse; }
992 /* doesn't matter where we evacuate values/finalizers to, since
993 * these pointers are treated as roots (iff the keys are alive).
997 last_w = &old_weak_ptr_list;
998 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1000 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1001 * called on a live weak pointer object. Just remove it.
1003 if (w->header.info == &stg_DEAD_WEAK_info) {
1004 next_w = ((StgDeadWeak *)w)->link;
1009 ASSERT(get_itbl(w)->type == WEAK);
1011 /* Now, check whether the key is reachable.
1013 new = isAlive(w->key);
1016 // evacuate the value and finalizer
1017 w->value = evacuate(w->value);
1018 w->finalizer = evacuate(w->finalizer);
1019 // remove this weak ptr from the old_weak_ptr list
1021 // and put it on the new weak ptr list
1023 w->link = weak_ptr_list;
1026 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1030 last_w = &(w->link);
1036 /* Now deal with the all_threads list, which behaves somewhat like
1037 * the weak ptr list. If we discover any threads that are about to
1038 * become garbage, we wake them up and administer an exception.
1041 StgTSO *t, *tmp, *next, **prev;
1043 prev = &old_all_threads;
1044 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1046 (StgClosure *)tmp = isAlive((StgClosure *)t);
1052 ASSERT(get_itbl(t)->type == TSO);
1053 switch (t->what_next) {
1054 case ThreadRelocated:
1059 case ThreadComplete:
1060 // finshed or died. The thread might still be alive, but we
1061 // don't keep it on the all_threads list. Don't forget to
1062 // stub out its global_link field.
1063 next = t->global_link;
1064 t->global_link = END_TSO_QUEUE;
1072 // not alive (yet): leave this thread on the old_all_threads list.
1073 prev = &(t->global_link);
1074 next = t->global_link;
1077 // alive: move this thread onto the all_threads list.
1078 next = t->global_link;
1079 t->global_link = all_threads;
1086 /* If we didn't make any changes, then we can go round and kill all
1087 * the dead weak pointers. The old_weak_ptr list is used as a list
1088 * of pending finalizers later on.
1090 if (flag == rtsFalse) {
1091 for (w = old_weak_ptr_list; w; w = w->link) {
1092 w->finalizer = evacuate(w->finalizer);
1095 /* And resurrect any threads which were about to become garbage.
1098 StgTSO *t, *tmp, *next;
1099 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1100 next = t->global_link;
1101 (StgClosure *)tmp = evacuate((StgClosure *)t);
1102 tmp->global_link = resurrected_threads;
1103 resurrected_threads = tmp;
1107 weak_done = rtsTrue;
1113 /* -----------------------------------------------------------------------------
1114 After GC, the live weak pointer list may have forwarding pointers
1115 on it, because a weak pointer object was evacuated after being
1116 moved to the live weak pointer list. We remove those forwarding
1119 Also, we don't consider weak pointer objects to be reachable, but
1120 we must nevertheless consider them to be "live" and retain them.
1121 Therefore any weak pointer objects which haven't as yet been
1122 evacuated need to be evacuated now.
1123 -------------------------------------------------------------------------- */
1127 mark_weak_ptr_list ( StgWeak **list )
1129 StgWeak *w, **last_w;
1132 for (w = *list; w; w = w->link) {
1133 (StgClosure *)w = evacuate((StgClosure *)w);
1135 last_w = &(w->link);
1139 /* -----------------------------------------------------------------------------
1140 isAlive determines whether the given closure is still alive (after
1141 a garbage collection) or not. It returns the new address of the
1142 closure if it is alive, or NULL otherwise.
1144 NOTE: Use it before compaction only!
1145 -------------------------------------------------------------------------- */
1149 isAlive(StgClosure *p)
1151 const StgInfoTable *info;
1158 /* ToDo: for static closures, check the static link field.
1159 * Problem here is that we sometimes don't set the link field, eg.
1160 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1165 // ignore closures in generations that we're not collecting.
1166 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1169 // large objects have an evacuated flag
1170 if (bd->flags & BF_LARGE) {
1171 if (bd->flags & BF_EVACUATED) {
1177 // check the mark bit for compacted steps
1178 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1182 switch (info->type) {
1187 case IND_OLDGEN: // rely on compatible layout with StgInd
1188 case IND_OLDGEN_PERM:
1189 // follow indirections
1190 p = ((StgInd *)p)->indirectee;
1195 return ((StgEvacuated *)p)->evacuee;
1198 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1199 p = (StgClosure *)((StgTSO *)p)->link;
1211 mark_root(StgClosure **root)
1213 *root = evacuate(*root);
1219 bdescr *bd = allocBlock();
1220 bd->gen_no = stp->gen_no;
1223 if (stp->gen_no <= N) {
1224 bd->flags = BF_EVACUATED;
1229 stp->hp_bd->free = stp->hp;
1230 stp->hp_bd->link = bd;
1231 stp->hp = bd->start;
1232 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1239 static __inline__ void
1240 upd_evacuee(StgClosure *p, StgClosure *dest)
1242 p->header.info = &stg_EVACUATED_info;
1243 ((StgEvacuated *)p)->evacuee = dest;
1247 static __inline__ StgClosure *
1248 copy(StgClosure *src, nat size, step *stp)
1252 TICK_GC_WORDS_COPIED(size);
1253 /* Find out where we're going, using the handy "to" pointer in
1254 * the step of the source object. If it turns out we need to
1255 * evacuate to an older generation, adjust it here (see comment
1258 if (stp->gen_no < evac_gen) {
1259 #ifdef NO_EAGER_PROMOTION
1260 failed_to_evac = rtsTrue;
1262 stp = &generations[evac_gen].steps[0];
1266 /* chain a new block onto the to-space for the destination step if
1269 if (stp->hp + size >= stp->hpLim) {
1273 for(to = stp->hp, from = (P_)src; size>0; --size) {
1279 upd_evacuee(src,(StgClosure *)dest);
1280 return (StgClosure *)dest;
1283 /* Special version of copy() for when we only want to copy the info
1284 * pointer of an object, but reserve some padding after it. This is
1285 * used to optimise evacuation of BLACKHOLEs.
1289 static __inline__ StgClosure *
1290 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1294 TICK_GC_WORDS_COPIED(size_to_copy);
1295 if (stp->gen_no < evac_gen) {
1296 #ifdef NO_EAGER_PROMOTION
1297 failed_to_evac = rtsTrue;
1299 stp = &generations[evac_gen].steps[0];
1303 if (stp->hp + size_to_reserve >= stp->hpLim) {
1307 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1312 stp->hp += size_to_reserve;
1313 upd_evacuee(src,(StgClosure *)dest);
1314 return (StgClosure *)dest;
1318 /* -----------------------------------------------------------------------------
1319 Evacuate a large object
1321 This just consists of removing the object from the (doubly-linked)
1322 large_alloc_list, and linking it on to the (singly-linked)
1323 new_large_objects list, from where it will be scavenged later.
1325 Convention: bd->flags has BF_EVACUATED set for a large object
1326 that has been evacuated, or unset otherwise.
1327 -------------------------------------------------------------------------- */
1331 evacuate_large(StgPtr p)
1333 bdescr *bd = Bdescr(p);
1336 // should point to the beginning of the block
1337 ASSERT(((W_)p & BLOCK_MASK) == 0);
1339 // already evacuated?
1340 if (bd->flags & BF_EVACUATED) {
1341 /* Don't forget to set the failed_to_evac flag if we didn't get
1342 * the desired destination (see comments in evacuate()).
1344 if (bd->gen_no < evac_gen) {
1345 failed_to_evac = rtsTrue;
1346 TICK_GC_FAILED_PROMOTION();
1352 // remove from large_object list
1354 bd->u.back->link = bd->link;
1355 } else { // first object in the list
1356 stp->large_objects = bd->link;
1359 bd->link->u.back = bd->u.back;
1362 /* link it on to the evacuated large object list of the destination step
1365 if (stp->gen_no < evac_gen) {
1366 #ifdef NO_EAGER_PROMOTION
1367 failed_to_evac = rtsTrue;
1369 stp = &generations[evac_gen].steps[0];
1374 bd->gen_no = stp->gen_no;
1375 bd->link = stp->new_large_objects;
1376 stp->new_large_objects = bd;
1377 bd->flags |= BF_EVACUATED;
1380 /* -----------------------------------------------------------------------------
1381 Adding a MUT_CONS to an older generation.
1383 This is necessary from time to time when we end up with an
1384 old-to-new generation pointer in a non-mutable object. We defer
1385 the promotion until the next GC.
1386 -------------------------------------------------------------------------- */
1390 mkMutCons(StgClosure *ptr, generation *gen)
1395 stp = &gen->steps[0];
1397 /* chain a new block onto the to-space for the destination step if
1400 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1404 q = (StgMutVar *)stp->hp;
1405 stp->hp += sizeofW(StgMutVar);
1407 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1409 recordOldToNewPtrs((StgMutClosure *)q);
1411 return (StgClosure *)q;
1414 /* -----------------------------------------------------------------------------
1417 This is called (eventually) for every live object in the system.
1419 The caller to evacuate specifies a desired generation in the
1420 evac_gen global variable. The following conditions apply to
1421 evacuating an object which resides in generation M when we're
1422 collecting up to generation N
1426 else evac to step->to
1428 if M < evac_gen evac to evac_gen, step 0
1430 if the object is already evacuated, then we check which generation
1433 if M >= evac_gen do nothing
1434 if M < evac_gen set failed_to_evac flag to indicate that we
1435 didn't manage to evacuate this object into evac_gen.
1437 -------------------------------------------------------------------------- */
1440 evacuate(StgClosure *q)
1445 const StgInfoTable *info;
1448 if (HEAP_ALLOCED(q)) {
1451 if (bd->gen_no > N) {
1452 /* Can't evacuate this object, because it's in a generation
1453 * older than the ones we're collecting. Let's hope that it's
1454 * in evac_gen or older, or we will have to arrange to track
1455 * this pointer using the mutable list.
1457 if (bd->gen_no < evac_gen) {
1459 failed_to_evac = rtsTrue;
1460 TICK_GC_FAILED_PROMOTION();
1465 /* evacuate large objects by re-linking them onto a different list.
1467 if (bd->flags & BF_LARGE) {
1469 if (info->type == TSO &&
1470 ((StgTSO *)q)->what_next == ThreadRelocated) {
1471 q = (StgClosure *)((StgTSO *)q)->link;
1474 evacuate_large((P_)q);
1478 /* If the object is in a step that we're compacting, then we
1479 * need to use an alternative evacuate procedure.
1481 if (bd->step->is_compacted) {
1482 if (!is_marked((P_)q,bd)) {
1484 if (mark_stack_full()) {
1485 mark_stack_overflowed = rtsTrue;
1488 push_mark_stack((P_)q);
1496 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1499 // make sure the info pointer is into text space
1500 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1501 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1504 switch (info -> type) {
1508 to = copy(q,sizeW_fromITBL(info),stp);
1513 StgWord w = (StgWord)q->payload[0];
1514 if (q->header.info == Czh_con_info &&
1515 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1516 (StgChar)w <= MAX_CHARLIKE) {
1517 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1519 if (q->header.info == Izh_con_info &&
1520 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1521 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1523 // else, fall through ...
1529 return copy(q,sizeofW(StgHeader)+1,stp);
1531 case THUNK_1_0: // here because of MIN_UPD_SIZE
1536 #ifdef NO_PROMOTE_THUNKS
1537 if (bd->gen_no == 0 &&
1538 bd->step->no != 0 &&
1539 bd->step->no == generations[bd->gen_no].n_steps-1) {
1543 return copy(q,sizeofW(StgHeader)+2,stp);
1551 return copy(q,sizeofW(StgHeader)+2,stp);
1557 case IND_OLDGEN_PERM:
1562 return copy(q,sizeW_fromITBL(info),stp);
1565 case SE_CAF_BLACKHOLE:
1568 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1571 to = copy(q,BLACKHOLE_sizeW(),stp);
1574 case THUNK_SELECTOR:
1576 const StgInfoTable* selectee_info;
1577 StgClosure* selectee = ((StgSelector*)q)->selectee;
1580 selectee_info = get_itbl(selectee);
1581 switch (selectee_info->type) {
1590 StgWord offset = info->layout.selector_offset;
1592 // check that the size is in range
1594 (StgWord32)(selectee_info->layout.payload.ptrs +
1595 selectee_info->layout.payload.nptrs));
1597 // perform the selection!
1598 q = selectee->payload[offset];
1600 /* if we're already in to-space, there's no need to continue
1601 * with the evacuation, just update the source address with
1602 * a pointer to the (evacuated) constructor field.
1604 if (HEAP_ALLOCED(q)) {
1605 bdescr *bd = Bdescr((P_)q);
1606 if (bd->flags & BF_EVACUATED) {
1607 if (bd->gen_no < evac_gen) {
1608 failed_to_evac = rtsTrue;
1609 TICK_GC_FAILED_PROMOTION();
1615 /* otherwise, carry on and evacuate this constructor field,
1616 * (but not the constructor itself)
1625 case IND_OLDGEN_PERM:
1626 selectee = ((StgInd *)selectee)->indirectee;
1630 selectee = ((StgEvacuated *)selectee)->evacuee;
1633 case THUNK_SELECTOR:
1635 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1636 something) to go into an infinite loop when the nightly
1637 stage2 compiles PrelTup.lhs. */
1639 /* we can't recurse indefinitely in evacuate(), so set a
1640 * limit on the number of times we can go around this
1643 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1645 bd = Bdescr((P_)selectee);
1646 if (!bd->flags & BF_EVACUATED) {
1647 thunk_selector_depth++;
1648 selectee = evacuate(selectee);
1649 thunk_selector_depth--;
1653 // otherwise, fall through...
1665 case SE_CAF_BLACKHOLE:
1669 // not evaluated yet
1673 // a copy of the top-level cases below
1674 case RBH: // cf. BLACKHOLE_BQ
1676 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1677 to = copy(q,BLACKHOLE_sizeW(),stp);
1678 //ToDo: derive size etc from reverted IP
1679 //to = copy(q,size,stp);
1680 // recordMutable((StgMutClosure *)to);
1685 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1686 to = copy(q,sizeofW(StgBlockedFetch),stp);
1693 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1694 to = copy(q,sizeofW(StgFetchMe),stp);
1698 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1699 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1704 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1705 (int)(selectee_info->type));
1708 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1712 // follow chains of indirections, don't evacuate them
1713 q = ((StgInd*)q)->indirectee;
1717 if (info->srt_len > 0 && major_gc &&
1718 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1719 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1720 static_objects = (StgClosure *)q;
1725 if (info->srt_len > 0 && major_gc &&
1726 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1727 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1728 static_objects = (StgClosure *)q;
1733 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1734 * on the CAF list, so don't do anything with it here (we'll
1735 * scavenge it later).
1738 && ((StgIndStatic *)q)->saved_info == NULL
1739 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1740 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1741 static_objects = (StgClosure *)q;
1746 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1747 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1748 static_objects = (StgClosure *)q;
1752 case CONSTR_INTLIKE:
1753 case CONSTR_CHARLIKE:
1754 case CONSTR_NOCAF_STATIC:
1755 /* no need to put these on the static linked list, they don't need
1770 // shouldn't see these
1771 barf("evacuate: stack frame at %p\n", q);
1775 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1776 * of stack, tagging and all.
1778 return copy(q,pap_sizeW((StgPAP*)q),stp);
1781 /* Already evacuated, just return the forwarding address.
1782 * HOWEVER: if the requested destination generation (evac_gen) is
1783 * older than the actual generation (because the object was
1784 * already evacuated to a younger generation) then we have to
1785 * set the failed_to_evac flag to indicate that we couldn't
1786 * manage to promote the object to the desired generation.
1788 if (evac_gen > 0) { // optimisation
1789 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1790 if (Bdescr((P_)p)->gen_no < evac_gen) {
1791 failed_to_evac = rtsTrue;
1792 TICK_GC_FAILED_PROMOTION();
1795 return ((StgEvacuated*)q)->evacuee;
1798 // just copy the block
1799 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1802 case MUT_ARR_PTRS_FROZEN:
1803 // just copy the block
1804 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1808 StgTSO *tso = (StgTSO *)q;
1810 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1812 if (tso->what_next == ThreadRelocated) {
1813 q = (StgClosure *)tso->link;
1817 /* To evacuate a small TSO, we need to relocate the update frame
1821 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1822 move_TSO(tso, new_tso);
1823 return (StgClosure *)new_tso;
1828 case RBH: // cf. BLACKHOLE_BQ
1830 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1831 to = copy(q,BLACKHOLE_sizeW(),stp);
1832 //ToDo: derive size etc from reverted IP
1833 //to = copy(q,size,stp);
1835 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1836 q, info_type(q), to, info_type(to)));
1841 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1842 to = copy(q,sizeofW(StgBlockedFetch),stp);
1844 belch("@@ evacuate: %p (%s) to %p (%s)",
1845 q, info_type(q), to, info_type(to)));
1852 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1853 to = copy(q,sizeofW(StgFetchMe),stp);
1855 belch("@@ evacuate: %p (%s) to %p (%s)",
1856 q, info_type(q), to, info_type(to)));
1860 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1861 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1863 belch("@@ evacuate: %p (%s) to %p (%s)",
1864 q, info_type(q), to, info_type(to)));
1869 barf("evacuate: strange closure type %d", (int)(info->type));
1875 /* -----------------------------------------------------------------------------
1876 move_TSO is called to update the TSO structure after it has been
1877 moved from one place to another.
1878 -------------------------------------------------------------------------- */
1881 move_TSO(StgTSO *src, StgTSO *dest)
1885 // relocate the stack pointers...
1886 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1887 dest->sp = (StgPtr)dest->sp + diff;
1888 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1890 relocate_stack(dest, diff);
1893 /* -----------------------------------------------------------------------------
1894 relocate_stack is called to update the linkage between
1895 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1897 -------------------------------------------------------------------------- */
1900 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1908 while ((P_)su < dest->stack + dest->stack_size) {
1909 switch (get_itbl(su)->type) {
1911 // GCC actually manages to common up these three cases!
1914 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1919 cf = (StgCatchFrame *)su;
1920 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1925 sf = (StgSeqFrame *)su;
1926 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1935 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1946 scavenge_srt(const StgInfoTable *info)
1948 StgClosure **srt, **srt_end;
1950 /* evacuate the SRT. If srt_len is zero, then there isn't an
1951 * srt field in the info table. That's ok, because we'll
1952 * never dereference it.
1954 srt = (StgClosure **)(info->srt);
1955 srt_end = srt + info->srt_len;
1956 for (; srt < srt_end; srt++) {
1957 /* Special-case to handle references to closures hiding out in DLLs, since
1958 double indirections required to get at those. The code generator knows
1959 which is which when generating the SRT, so it stores the (indirect)
1960 reference to the DLL closure in the table by first adding one to it.
1961 We check for this here, and undo the addition before evacuating it.
1963 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1964 closure that's fixed at link-time, and no extra magic is required.
1966 #ifdef ENABLE_WIN32_DLL_SUPPORT
1967 if ( (unsigned long)(*srt) & 0x1 ) {
1968 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1978 /* -----------------------------------------------------------------------------
1980 -------------------------------------------------------------------------- */
1983 scavengeTSO (StgTSO *tso)
1985 // chase the link field for any TSOs on the same queue
1986 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1987 if ( tso->why_blocked == BlockedOnMVar
1988 || tso->why_blocked == BlockedOnBlackHole
1989 || tso->why_blocked == BlockedOnException
1991 || tso->why_blocked == BlockedOnGA
1992 || tso->why_blocked == BlockedOnGA_NoSend
1995 tso->block_info.closure = evacuate(tso->block_info.closure);
1997 if ( tso->blocked_exceptions != NULL ) {
1998 tso->blocked_exceptions =
1999 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2001 // scavenge this thread's stack
2002 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2005 /* -----------------------------------------------------------------------------
2006 Scavenge a given step until there are no more objects in this step
2009 evac_gen is set by the caller to be either zero (for a step in a
2010 generation < N) or G where G is the generation of the step being
2013 We sometimes temporarily change evac_gen back to zero if we're
2014 scavenging a mutable object where early promotion isn't such a good
2016 -------------------------------------------------------------------------- */
2024 nat saved_evac_gen = evac_gen;
2029 failed_to_evac = rtsFalse;
2031 /* scavenge phase - standard breadth-first scavenging of the
2035 while (bd != stp->hp_bd || p < stp->hp) {
2037 // If we're at the end of this block, move on to the next block
2038 if (bd != stp->hp_bd && p == bd->free) {
2044 info = get_itbl((StgClosure *)p);
2045 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2048 switch (info->type) {
2051 /* treat MVars specially, because we don't want to evacuate the
2052 * mut_link field in the middle of the closure.
2055 StgMVar *mvar = ((StgMVar *)p);
2057 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2058 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2059 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2060 evac_gen = saved_evac_gen;
2061 recordMutable((StgMutClosure *)mvar);
2062 failed_to_evac = rtsFalse; // mutable.
2063 p += sizeofW(StgMVar);
2071 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2072 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2073 p += sizeofW(StgHeader) + 2;
2078 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2079 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2085 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2086 p += sizeofW(StgHeader) + 1;
2091 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2097 p += sizeofW(StgHeader) + 1;
2104 p += sizeofW(StgHeader) + 2;
2111 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2112 p += sizeofW(StgHeader) + 2;
2128 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2129 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2130 (StgClosure *)*p = evacuate((StgClosure *)*p);
2132 p += info->layout.payload.nptrs;
2137 if (stp->gen_no != 0) {
2138 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2141 case IND_OLDGEN_PERM:
2142 ((StgIndOldGen *)p)->indirectee =
2143 evacuate(((StgIndOldGen *)p)->indirectee);
2144 if (failed_to_evac) {
2145 failed_to_evac = rtsFalse;
2146 recordOldToNewPtrs((StgMutClosure *)p);
2148 p += sizeofW(StgIndOldGen);
2153 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2154 evac_gen = saved_evac_gen;
2155 recordMutable((StgMutClosure *)p);
2156 failed_to_evac = rtsFalse; // mutable anyhow
2157 p += sizeofW(StgMutVar);
2162 failed_to_evac = rtsFalse; // mutable anyhow
2163 p += sizeofW(StgMutVar);
2167 case SE_CAF_BLACKHOLE:
2170 p += BLACKHOLE_sizeW();
2175 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2176 (StgClosure *)bh->blocking_queue =
2177 evacuate((StgClosure *)bh->blocking_queue);
2178 recordMutable((StgMutClosure *)bh);
2179 failed_to_evac = rtsFalse;
2180 p += BLACKHOLE_sizeW();
2184 case THUNK_SELECTOR:
2186 StgSelector *s = (StgSelector *)p;
2187 s->selectee = evacuate(s->selectee);
2188 p += THUNK_SELECTOR_sizeW();
2192 case AP_UPD: // same as PAPs
2194 /* Treat a PAP just like a section of stack, not forgetting to
2195 * evacuate the function pointer too...
2198 StgPAP* pap = (StgPAP *)p;
2200 pap->fun = evacuate(pap->fun);
2201 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2202 p += pap_sizeW(pap);
2207 // nothing to follow
2208 p += arr_words_sizeW((StgArrWords *)p);
2212 // follow everything
2216 evac_gen = 0; // repeatedly mutable
2217 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2218 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2219 (StgClosure *)*p = evacuate((StgClosure *)*p);
2221 evac_gen = saved_evac_gen;
2222 recordMutable((StgMutClosure *)q);
2223 failed_to_evac = rtsFalse; // mutable anyhow.
2227 case MUT_ARR_PTRS_FROZEN:
2228 // follow everything
2232 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2233 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2234 (StgClosure *)*p = evacuate((StgClosure *)*p);
2236 // it's tempting to recordMutable() if failed_to_evac is
2237 // false, but that breaks some assumptions (eg. every
2238 // closure on the mutable list is supposed to have the MUT
2239 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2245 StgTSO *tso = (StgTSO *)p;
2248 evac_gen = saved_evac_gen;
2249 recordMutable((StgMutClosure *)tso);
2250 failed_to_evac = rtsFalse; // mutable anyhow.
2251 p += tso_sizeW(tso);
2256 case RBH: // cf. BLACKHOLE_BQ
2259 nat size, ptrs, nonptrs, vhs;
2261 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2263 StgRBH *rbh = (StgRBH *)p;
2264 (StgClosure *)rbh->blocking_queue =
2265 evacuate((StgClosure *)rbh->blocking_queue);
2266 recordMutable((StgMutClosure *)to);
2267 failed_to_evac = rtsFalse; // mutable anyhow.
2269 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2270 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2271 // ToDo: use size of reverted closure here!
2272 p += BLACKHOLE_sizeW();
2278 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2279 // follow the pointer to the node which is being demanded
2280 (StgClosure *)bf->node =
2281 evacuate((StgClosure *)bf->node);
2282 // follow the link to the rest of the blocking queue
2283 (StgClosure *)bf->link =
2284 evacuate((StgClosure *)bf->link);
2285 if (failed_to_evac) {
2286 failed_to_evac = rtsFalse;
2287 recordMutable((StgMutClosure *)bf);
2290 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2291 bf, info_type((StgClosure *)bf),
2292 bf->node, info_type(bf->node)));
2293 p += sizeofW(StgBlockedFetch);
2301 p += sizeofW(StgFetchMe);
2302 break; // nothing to do in this case
2304 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2306 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2307 (StgClosure *)fmbq->blocking_queue =
2308 evacuate((StgClosure *)fmbq->blocking_queue);
2309 if (failed_to_evac) {
2310 failed_to_evac = rtsFalse;
2311 recordMutable((StgMutClosure *)fmbq);
2314 belch("@@ scavenge: %p (%s) exciting, isn't it",
2315 p, info_type((StgClosure *)p)));
2316 p += sizeofW(StgFetchMeBlockingQueue);
2322 barf("scavenge: unimplemented/strange closure type %d @ %p",
2326 /* If we didn't manage to promote all the objects pointed to by
2327 * the current object, then we have to designate this object as
2328 * mutable (because it contains old-to-new generation pointers).
2330 if (failed_to_evac) {
2331 failed_to_evac = rtsFalse;
2332 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2340 /* -----------------------------------------------------------------------------
2341 Scavenge everything on the mark stack.
2343 This is slightly different from scavenge():
2344 - we don't walk linearly through the objects, so the scavenger
2345 doesn't need to advance the pointer on to the next object.
2346 -------------------------------------------------------------------------- */
2349 scavenge_mark_stack(void)
2355 evac_gen = oldest_gen->no;
2356 saved_evac_gen = evac_gen;
2359 while (!mark_stack_empty()) {
2360 p = pop_mark_stack();
2362 info = get_itbl((StgClosure *)p);
2363 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2366 switch (info->type) {
2369 /* treat MVars specially, because we don't want to evacuate the
2370 * mut_link field in the middle of the closure.
2373 StgMVar *mvar = ((StgMVar *)p);
2375 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2376 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2377 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2378 evac_gen = saved_evac_gen;
2379 failed_to_evac = rtsFalse; // mutable.
2387 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2388 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2398 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2423 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2424 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2425 (StgClosure *)*p = evacuate((StgClosure *)*p);
2431 // don't need to do anything here: the only possible case
2432 // is that we're in a 1-space compacting collector, with
2433 // no "old" generation.
2437 case IND_OLDGEN_PERM:
2438 ((StgIndOldGen *)p)->indirectee =
2439 evacuate(((StgIndOldGen *)p)->indirectee);
2440 if (failed_to_evac) {
2441 recordOldToNewPtrs((StgMutClosure *)p);
2443 failed_to_evac = rtsFalse;
2448 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2449 evac_gen = saved_evac_gen;
2450 failed_to_evac = rtsFalse;
2455 failed_to_evac = rtsFalse;
2459 case SE_CAF_BLACKHOLE:
2467 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2468 (StgClosure *)bh->blocking_queue =
2469 evacuate((StgClosure *)bh->blocking_queue);
2470 failed_to_evac = rtsFalse;
2474 case THUNK_SELECTOR:
2476 StgSelector *s = (StgSelector *)p;
2477 s->selectee = evacuate(s->selectee);
2481 case AP_UPD: // same as PAPs
2483 /* Treat a PAP just like a section of stack, not forgetting to
2484 * evacuate the function pointer too...
2487 StgPAP* pap = (StgPAP *)p;
2489 pap->fun = evacuate(pap->fun);
2490 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2495 // follow everything
2499 evac_gen = 0; // repeatedly mutable
2500 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2501 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2502 (StgClosure *)*p = evacuate((StgClosure *)*p);
2504 evac_gen = saved_evac_gen;
2505 failed_to_evac = rtsFalse; // mutable anyhow.
2509 case MUT_ARR_PTRS_FROZEN:
2510 // follow everything
2514 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2515 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2516 (StgClosure *)*p = evacuate((StgClosure *)*p);
2523 StgTSO *tso = (StgTSO *)p;
2526 evac_gen = saved_evac_gen;
2527 failed_to_evac = rtsFalse;
2532 case RBH: // cf. BLACKHOLE_BQ
2535 nat size, ptrs, nonptrs, vhs;
2537 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2539 StgRBH *rbh = (StgRBH *)p;
2540 (StgClosure *)rbh->blocking_queue =
2541 evacuate((StgClosure *)rbh->blocking_queue);
2542 recordMutable((StgMutClosure *)rbh);
2543 failed_to_evac = rtsFalse; // mutable anyhow.
2545 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2546 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2552 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2553 // follow the pointer to the node which is being demanded
2554 (StgClosure *)bf->node =
2555 evacuate((StgClosure *)bf->node);
2556 // follow the link to the rest of the blocking queue
2557 (StgClosure *)bf->link =
2558 evacuate((StgClosure *)bf->link);
2559 if (failed_to_evac) {
2560 failed_to_evac = rtsFalse;
2561 recordMutable((StgMutClosure *)bf);
2564 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2565 bf, info_type((StgClosure *)bf),
2566 bf->node, info_type(bf->node)));
2574 break; // nothing to do in this case
2576 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2578 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2579 (StgClosure *)fmbq->blocking_queue =
2580 evacuate((StgClosure *)fmbq->blocking_queue);
2581 if (failed_to_evac) {
2582 failed_to_evac = rtsFalse;
2583 recordMutable((StgMutClosure *)fmbq);
2586 belch("@@ scavenge: %p (%s) exciting, isn't it",
2587 p, info_type((StgClosure *)p)));
2593 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2597 if (failed_to_evac) {
2598 failed_to_evac = rtsFalse;
2599 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2602 // mark the next bit to indicate "scavenged"
2603 mark(q+1, Bdescr(q));
2605 } // while (!mark_stack_empty())
2607 // start a new linear scan if the mark stack overflowed at some point
2608 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2609 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2610 mark_stack_overflowed = rtsFalse;
2611 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2612 oldgen_scan = oldgen_scan_bd->start;
2615 if (oldgen_scan_bd) {
2616 // push a new thing on the mark stack
2618 // find a closure that is marked but not scavenged, and start
2620 while (oldgen_scan < oldgen_scan_bd->free
2621 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2625 if (oldgen_scan < oldgen_scan_bd->free) {
2627 // already scavenged?
2628 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2629 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2632 push_mark_stack(oldgen_scan);
2633 // ToDo: bump the linear scan by the actual size of the object
2634 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2638 oldgen_scan_bd = oldgen_scan_bd->link;
2639 if (oldgen_scan_bd != NULL) {
2640 oldgen_scan = oldgen_scan_bd->start;
2646 /* -----------------------------------------------------------------------------
2647 Scavenge one object.
2649 This is used for objects that are temporarily marked as mutable
2650 because they contain old-to-new generation pointers. Only certain
2651 objects can have this property.
2652 -------------------------------------------------------------------------- */
2655 scavenge_one(StgPtr p)
2657 const StgInfoTable *info;
2658 nat saved_evac_gen = evac_gen;
2661 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2662 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2664 info = get_itbl((StgClosure *)p);
2666 switch (info->type) {
2669 case FUN_1_0: // hardly worth specialising these guys
2689 case IND_OLDGEN_PERM:
2693 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2694 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2695 (StgClosure *)*q = evacuate((StgClosure *)*q);
2701 case SE_CAF_BLACKHOLE:
2706 case THUNK_SELECTOR:
2708 StgSelector *s = (StgSelector *)p;
2709 s->selectee = evacuate(s->selectee);
2714 // nothing to follow
2719 // follow everything
2722 evac_gen = 0; // repeatedly mutable
2723 recordMutable((StgMutClosure *)p);
2724 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2725 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2726 (StgClosure *)*p = evacuate((StgClosure *)*p);
2728 evac_gen = saved_evac_gen;
2729 failed_to_evac = rtsFalse;
2733 case MUT_ARR_PTRS_FROZEN:
2735 // follow everything
2738 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2739 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2740 (StgClosure *)*p = evacuate((StgClosure *)*p);
2747 StgTSO *tso = (StgTSO *)p;
2749 evac_gen = 0; // repeatedly mutable
2751 recordMutable((StgMutClosure *)tso);
2752 evac_gen = saved_evac_gen;
2753 failed_to_evac = rtsFalse;
2760 StgPAP* pap = (StgPAP *)p;
2761 pap->fun = evacuate(pap->fun);
2762 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2767 // This might happen if for instance a MUT_CONS was pointing to a
2768 // THUNK which has since been updated. The IND_OLDGEN will
2769 // be on the mutable list anyway, so we don't need to do anything
2774 barf("scavenge_one: strange object %d", (int)(info->type));
2777 no_luck = failed_to_evac;
2778 failed_to_evac = rtsFalse;
2782 /* -----------------------------------------------------------------------------
2783 Scavenging mutable lists.
2785 We treat the mutable list of each generation > N (i.e. all the
2786 generations older than the one being collected) as roots. We also
2787 remove non-mutable objects from the mutable list at this point.
2788 -------------------------------------------------------------------------- */
2791 scavenge_mut_once_list(generation *gen)
2793 const StgInfoTable *info;
2794 StgMutClosure *p, *next, *new_list;
2796 p = gen->mut_once_list;
2797 new_list = END_MUT_LIST;
2801 failed_to_evac = rtsFalse;
2803 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2805 // make sure the info pointer is into text space
2806 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2807 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2811 if (info->type==RBH)
2812 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2814 switch(info->type) {
2817 case IND_OLDGEN_PERM:
2819 /* Try to pull the indirectee into this generation, so we can
2820 * remove the indirection from the mutable list.
2822 ((StgIndOldGen *)p)->indirectee =
2823 evacuate(((StgIndOldGen *)p)->indirectee);
2825 #if 0 && defined(DEBUG)
2826 if (RtsFlags.DebugFlags.gc)
2827 /* Debugging code to print out the size of the thing we just
2831 StgPtr start = gen->steps[0].scan;
2832 bdescr *start_bd = gen->steps[0].scan_bd;
2834 scavenge(&gen->steps[0]);
2835 if (start_bd != gen->steps[0].scan_bd) {
2836 size += (P_)BLOCK_ROUND_UP(start) - start;
2837 start_bd = start_bd->link;
2838 while (start_bd != gen->steps[0].scan_bd) {
2839 size += BLOCK_SIZE_W;
2840 start_bd = start_bd->link;
2842 size += gen->steps[0].scan -
2843 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2845 size = gen->steps[0].scan - start;
2847 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2851 /* failed_to_evac might happen if we've got more than two
2852 * generations, we're collecting only generation 0, the
2853 * indirection resides in generation 2 and the indirectee is
2856 if (failed_to_evac) {
2857 failed_to_evac = rtsFalse;
2858 p->mut_link = new_list;
2861 /* the mut_link field of an IND_STATIC is overloaded as the
2862 * static link field too (it just so happens that we don't need
2863 * both at the same time), so we need to NULL it out when
2864 * removing this object from the mutable list because the static
2865 * link fields are all assumed to be NULL before doing a major
2873 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2874 * it from the mutable list if possible by promoting whatever it
2877 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2878 /* didn't manage to promote everything, so put the
2879 * MUT_CONS back on the list.
2881 p->mut_link = new_list;
2887 // shouldn't have anything else on the mutables list
2888 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2892 gen->mut_once_list = new_list;
2897 scavenge_mutable_list(generation *gen)
2899 const StgInfoTable *info;
2900 StgMutClosure *p, *next;
2902 p = gen->saved_mut_list;
2906 failed_to_evac = rtsFalse;
2908 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2910 // make sure the info pointer is into text space
2911 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2912 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2916 if (info->type==RBH)
2917 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2919 switch(info->type) {
2922 // follow everything
2923 p->mut_link = gen->mut_list;
2928 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2929 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2930 (StgClosure *)*q = evacuate((StgClosure *)*q);
2935 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2936 case MUT_ARR_PTRS_FROZEN:
2941 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2942 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2943 (StgClosure *)*q = evacuate((StgClosure *)*q);
2947 if (failed_to_evac) {
2948 failed_to_evac = rtsFalse;
2949 mkMutCons((StgClosure *)p, gen);
2955 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2956 p->mut_link = gen->mut_list;
2962 StgMVar *mvar = (StgMVar *)p;
2963 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2964 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2965 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2966 p->mut_link = gen->mut_list;
2973 StgTSO *tso = (StgTSO *)p;
2977 /* Don't take this TSO off the mutable list - it might still
2978 * point to some younger objects (because we set evac_gen to 0
2981 tso->mut_link = gen->mut_list;
2982 gen->mut_list = (StgMutClosure *)tso;
2988 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2989 (StgClosure *)bh->blocking_queue =
2990 evacuate((StgClosure *)bh->blocking_queue);
2991 p->mut_link = gen->mut_list;
2996 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2999 case IND_OLDGEN_PERM:
3000 /* Try to pull the indirectee into this generation, so we can
3001 * remove the indirection from the mutable list.
3004 ((StgIndOldGen *)p)->indirectee =
3005 evacuate(((StgIndOldGen *)p)->indirectee);
3008 if (failed_to_evac) {
3009 failed_to_evac = rtsFalse;
3010 p->mut_link = gen->mut_once_list;
3011 gen->mut_once_list = p;
3018 // HWL: check whether all of these are necessary
3020 case RBH: // cf. BLACKHOLE_BQ
3022 // nat size, ptrs, nonptrs, vhs;
3024 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3025 StgRBH *rbh = (StgRBH *)p;
3026 (StgClosure *)rbh->blocking_queue =
3027 evacuate((StgClosure *)rbh->blocking_queue);
3028 if (failed_to_evac) {
3029 failed_to_evac = rtsFalse;
3030 recordMutable((StgMutClosure *)rbh);
3032 // ToDo: use size of reverted closure here!
3033 p += BLACKHOLE_sizeW();
3039 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3040 // follow the pointer to the node which is being demanded
3041 (StgClosure *)bf->node =
3042 evacuate((StgClosure *)bf->node);
3043 // follow the link to the rest of the blocking queue
3044 (StgClosure *)bf->link =
3045 evacuate((StgClosure *)bf->link);
3046 if (failed_to_evac) {
3047 failed_to_evac = rtsFalse;
3048 recordMutable((StgMutClosure *)bf);
3050 p += sizeofW(StgBlockedFetch);
3056 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3059 p += sizeofW(StgFetchMe);
3060 break; // nothing to do in this case
3062 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3064 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3065 (StgClosure *)fmbq->blocking_queue =
3066 evacuate((StgClosure *)fmbq->blocking_queue);
3067 if (failed_to_evac) {
3068 failed_to_evac = rtsFalse;
3069 recordMutable((StgMutClosure *)fmbq);
3071 p += sizeofW(StgFetchMeBlockingQueue);
3077 // shouldn't have anything else on the mutables list
3078 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3085 scavenge_static(void)
3087 StgClosure* p = static_objects;
3088 const StgInfoTable *info;
3090 /* Always evacuate straight to the oldest generation for static
3092 evac_gen = oldest_gen->no;
3094 /* keep going until we've scavenged all the objects on the linked
3096 while (p != END_OF_STATIC_LIST) {
3100 if (info->type==RBH)
3101 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3103 // make sure the info pointer is into text space
3104 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3105 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3107 /* Take this object *off* the static_objects list,
3108 * and put it on the scavenged_static_objects list.
3110 static_objects = STATIC_LINK(info,p);
3111 STATIC_LINK(info,p) = scavenged_static_objects;
3112 scavenged_static_objects = p;
3114 switch (info -> type) {
3118 StgInd *ind = (StgInd *)p;
3119 ind->indirectee = evacuate(ind->indirectee);
3121 /* might fail to evacuate it, in which case we have to pop it
3122 * back on the mutable list (and take it off the
3123 * scavenged_static list because the static link and mut link
3124 * pointers are one and the same).
3126 if (failed_to_evac) {
3127 failed_to_evac = rtsFalse;
3128 scavenged_static_objects = IND_STATIC_LINK(p);
3129 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3130 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3144 next = (P_)p->payload + info->layout.payload.ptrs;
3145 // evacuate the pointers
3146 for (q = (P_)p->payload; q < next; q++) {
3147 (StgClosure *)*q = evacuate((StgClosure *)*q);
3153 barf("scavenge_static: strange closure %d", (int)(info->type));
3156 ASSERT(failed_to_evac == rtsFalse);
3158 /* get the next static object from the list. Remember, there might
3159 * be more stuff on this list now that we've done some evacuating!
3160 * (static_objects is a global)
3166 /* -----------------------------------------------------------------------------
3167 scavenge_stack walks over a section of stack and evacuates all the
3168 objects pointed to by it. We can use the same code for walking
3169 PAPs, since these are just sections of copied stack.
3170 -------------------------------------------------------------------------- */
3173 scavenge_stack(StgPtr p, StgPtr stack_end)
3176 const StgInfoTable* info;
3179 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3182 * Each time around this loop, we are looking at a chunk of stack
3183 * that starts with either a pending argument section or an
3184 * activation record.
3187 while (p < stack_end) {
3190 // If we've got a tag, skip over that many words on the stack
3191 if (IS_ARG_TAG((W_)q)) {
3196 /* Is q a pointer to a closure?
3198 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3200 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3201 ASSERT(closure_STATIC((StgClosure *)q));
3203 // otherwise, must be a pointer into the allocation space.
3206 (StgClosure *)*p = evacuate((StgClosure *)q);
3212 * Otherwise, q must be the info pointer of an activation
3213 * record. All activation records have 'bitmap' style layout
3216 info = get_itbl((StgClosure *)p);
3218 switch (info->type) {
3220 // Dynamic bitmap: the mask is stored on the stack
3222 bitmap = ((StgRetDyn *)p)->liveness;
3223 p = (P_)&((StgRetDyn *)p)->payload[0];
3226 // probably a slow-entry point return address:
3234 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3235 old_p, p, old_p+1));
3237 p++; // what if FHS!=1 !? -- HWL
3242 /* Specialised code for update frames, since they're so common.
3243 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3244 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3248 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3250 p += sizeofW(StgUpdateFrame);
3253 frame->updatee = evacuate(frame->updatee);
3255 #else // specialised code for update frames, not sure if it's worth it.
3257 nat type = get_itbl(frame->updatee)->type;
3259 if (type == EVACUATED) {
3260 frame->updatee = evacuate(frame->updatee);
3263 bdescr *bd = Bdescr((P_)frame->updatee);
3265 if (bd->gen_no > N) {
3266 if (bd->gen_no < evac_gen) {
3267 failed_to_evac = rtsTrue;
3272 // Don't promote blackholes
3274 if (!(stp->gen_no == 0 &&
3276 stp->no == stp->gen->n_steps-1)) {
3283 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3284 sizeofW(StgHeader), stp);
3285 frame->updatee = to;
3288 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3289 frame->updatee = to;
3290 recordMutable((StgMutClosure *)to);
3293 /* will never be SE_{,CAF_}BLACKHOLE, since we
3294 don't push an update frame for single-entry thunks. KSW 1999-01. */
3295 barf("scavenge_stack: UPDATE_FRAME updatee");
3301 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3308 bitmap = info->layout.bitmap;
3310 // this assumes that the payload starts immediately after the info-ptr
3312 while (bitmap != 0) {
3313 if ((bitmap & 1) == 0) {
3314 (StgClosure *)*p = evacuate((StgClosure *)*p);
3317 bitmap = bitmap >> 1;
3324 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3329 StgLargeBitmap *large_bitmap;
3332 large_bitmap = info->layout.large_bitmap;
3335 for (i=0; i<large_bitmap->size; i++) {
3336 bitmap = large_bitmap->bitmap[i];
3337 q = p + BITS_IN(W_);
3338 while (bitmap != 0) {
3339 if ((bitmap & 1) == 0) {
3340 (StgClosure *)*p = evacuate((StgClosure *)*p);
3343 bitmap = bitmap >> 1;
3345 if (i+1 < large_bitmap->size) {
3347 (StgClosure *)*p = evacuate((StgClosure *)*p);
3353 // and don't forget to follow the SRT
3358 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3363 /*-----------------------------------------------------------------------------
3364 scavenge the large object list.
3366 evac_gen set by caller; similar games played with evac_gen as with
3367 scavenge() - see comment at the top of scavenge(). Most large
3368 objects are (repeatedly) mutable, so most of the time evac_gen will
3370 --------------------------------------------------------------------------- */
3373 scavenge_large(step *stp)
3378 bd = stp->new_large_objects;
3380 for (; bd != NULL; bd = stp->new_large_objects) {
3382 /* take this object *off* the large objects list and put it on
3383 * the scavenged large objects list. This is so that we can
3384 * treat new_large_objects as a stack and push new objects on
3385 * the front when evacuating.
3387 stp->new_large_objects = bd->link;
3388 dbl_link_onto(bd, &stp->scavenged_large_objects);
3390 // update the block count in this step.
3391 stp->n_scavenged_large_blocks += bd->blocks;
3394 if (scavenge_one(p)) {
3395 mkMutCons((StgClosure *)p, stp->gen);
3400 /* -----------------------------------------------------------------------------
3401 Initialising the static object & mutable lists
3402 -------------------------------------------------------------------------- */
3405 zero_static_object_list(StgClosure* first_static)
3409 const StgInfoTable *info;
3411 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3413 link = STATIC_LINK(info, p);
3414 STATIC_LINK(info,p) = NULL;
3418 /* This function is only needed because we share the mutable link
3419 * field with the static link field in an IND_STATIC, so we have to
3420 * zero the mut_link field before doing a major GC, which needs the
3421 * static link field.
3423 * It doesn't do any harm to zero all the mutable link fields on the
3428 zero_mutable_list( StgMutClosure *first )
3430 StgMutClosure *next, *c;
3432 for (c = first; c != END_MUT_LIST; c = next) {
3438 /* -----------------------------------------------------------------------------
3440 -------------------------------------------------------------------------- */
3447 for (c = (StgIndStatic *)caf_list; c != NULL;
3448 c = (StgIndStatic *)c->static_link)
3450 c->header.info = c->saved_info;
3451 c->saved_info = NULL;
3452 // could, but not necessary: c->static_link = NULL;
3458 scavengeCAFs( void )
3463 for (c = (StgIndStatic *)caf_list; c != NULL;
3464 c = (StgIndStatic *)c->static_link)
3466 c->indirectee = evacuate(c->indirectee);
3470 /* -----------------------------------------------------------------------------
3471 Sanity code for CAF garbage collection.
3473 With DEBUG turned on, we manage a CAF list in addition to the SRT
3474 mechanism. After GC, we run down the CAF list and blackhole any
3475 CAFs which have been garbage collected. This means we get an error
3476 whenever the program tries to enter a garbage collected CAF.
3478 Any garbage collected CAFs are taken off the CAF list at the same
3480 -------------------------------------------------------------------------- */
3482 #if 0 && defined(DEBUG)
3489 const StgInfoTable *info;
3500 ASSERT(info->type == IND_STATIC);
3502 if (STATIC_LINK(info,p) == NULL) {
3503 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3505 SET_INFO(p,&stg_BLACKHOLE_info);
3506 p = STATIC_LINK2(info,p);
3510 pp = &STATIC_LINK2(info,p);
3517 // belch("%d CAFs live", i);
3522 /* -----------------------------------------------------------------------------
3525 Whenever a thread returns to the scheduler after possibly doing
3526 some work, we have to run down the stack and black-hole all the
3527 closures referred to by update frames.
3528 -------------------------------------------------------------------------- */
3531 threadLazyBlackHole(StgTSO *tso)
3533 StgUpdateFrame *update_frame;
3534 StgBlockingQueue *bh;
3537 stack_end = &tso->stack[tso->stack_size];
3538 update_frame = tso->su;
3541 switch (get_itbl(update_frame)->type) {
3544 update_frame = ((StgCatchFrame *)update_frame)->link;
3548 bh = (StgBlockingQueue *)update_frame->updatee;
3550 /* if the thunk is already blackholed, it means we've also
3551 * already blackholed the rest of the thunks on this stack,
3552 * so we can stop early.
3554 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3555 * don't interfere with this optimisation.
3557 if (bh->header.info == &stg_BLACKHOLE_info) {
3561 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3562 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3563 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3564 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3566 SET_INFO(bh,&stg_BLACKHOLE_info);
3569 update_frame = update_frame->link;
3573 update_frame = ((StgSeqFrame *)update_frame)->link;
3579 barf("threadPaused");
3585 /* -----------------------------------------------------------------------------
3588 * Code largely pinched from old RTS, then hacked to bits. We also do
3589 * lazy black holing here.
3591 * -------------------------------------------------------------------------- */
3594 threadSqueezeStack(StgTSO *tso)
3596 lnat displacement = 0;
3597 StgUpdateFrame *frame;
3598 StgUpdateFrame *next_frame; // Temporally next
3599 StgUpdateFrame *prev_frame; // Temporally previous
3601 rtsBool prev_was_update_frame;
3603 StgUpdateFrame *top_frame;
3604 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3606 void printObj( StgClosure *obj ); // from Printer.c
3608 top_frame = tso->su;
3611 bottom = &(tso->stack[tso->stack_size]);
3614 /* There must be at least one frame, namely the STOP_FRAME.
3616 ASSERT((P_)frame < bottom);
3618 /* Walk down the stack, reversing the links between frames so that
3619 * we can walk back up as we squeeze from the bottom. Note that
3620 * next_frame and prev_frame refer to next and previous as they were
3621 * added to the stack, rather than the way we see them in this
3622 * walk. (It makes the next loop less confusing.)
3624 * Stop if we find an update frame pointing to a black hole
3625 * (see comment in threadLazyBlackHole()).
3629 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3630 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3631 prev_frame = frame->link;
3632 frame->link = next_frame;
3637 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3638 printObj((StgClosure *)prev_frame);
3639 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3642 switch (get_itbl(frame)->type) {
3645 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3658 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3660 printObj((StgClosure *)prev_frame);
3663 if (get_itbl(frame)->type == UPDATE_FRAME
3664 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3669 /* Now, we're at the bottom. Frame points to the lowest update
3670 * frame on the stack, and its link actually points to the frame
3671 * above. We have to walk back up the stack, squeezing out empty
3672 * update frames and turning the pointers back around on the way
3675 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3676 * we never want to eliminate it anyway. Just walk one step up
3677 * before starting to squeeze. When you get to the topmost frame,
3678 * remember that there are still some words above it that might have
3685 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3688 * Loop through all of the frames (everything except the very
3689 * bottom). Things are complicated by the fact that we have
3690 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3691 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3693 while (frame != NULL) {
3695 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3696 rtsBool is_update_frame;
3698 next_frame = frame->link;
3699 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3702 * 1. both the previous and current frame are update frames
3703 * 2. the current frame is empty
3705 if (prev_was_update_frame && is_update_frame &&
3706 (P_)prev_frame == frame_bottom + displacement) {
3708 // Now squeeze out the current frame
3709 StgClosure *updatee_keep = prev_frame->updatee;
3710 StgClosure *updatee_bypass = frame->updatee;
3713 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3717 /* Deal with blocking queues. If both updatees have blocked
3718 * threads, then we should merge the queues into the update
3719 * frame that we're keeping.
3721 * Alternatively, we could just wake them up: they'll just go
3722 * straight to sleep on the proper blackhole! This is less code
3723 * and probably less bug prone, although it's probably much
3726 #if 0 // do it properly...
3727 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3728 # error Unimplemented lazy BH warning. (KSW 1999-01)
3730 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3731 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3733 // Sigh. It has one. Don't lose those threads!
3734 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3735 // Urgh. Two queues. Merge them.
3736 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3738 while (keep_tso->link != END_TSO_QUEUE) {
3739 keep_tso = keep_tso->link;
3741 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3744 // For simplicity, just swap the BQ for the BH
3745 P_ temp = updatee_keep;
3747 updatee_keep = updatee_bypass;
3748 updatee_bypass = temp;
3750 // Record the swap in the kept frame (below)
3751 prev_frame->updatee = updatee_keep;
3756 TICK_UPD_SQUEEZED();
3757 /* wasn't there something about update squeezing and ticky to be
3758 * sorted out? oh yes: we aren't counting each enter properly
3759 * in this case. See the log somewhere. KSW 1999-04-21
3761 * Check two things: that the two update frames don't point to
3762 * the same object, and that the updatee_bypass isn't already an
3763 * indirection. Both of these cases only happen when we're in a
3764 * block hole-style loop (and there are multiple update frames
3765 * on the stack pointing to the same closure), but they can both
3766 * screw us up if we don't check.
3768 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3769 // this wakes the threads up
3770 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3773 sp = (P_)frame - 1; // sp = stuff to slide
3774 displacement += sizeofW(StgUpdateFrame);
3777 // No squeeze for this frame
3778 sp = frame_bottom - 1; // Keep the current frame
3780 /* Do lazy black-holing.
3782 if (is_update_frame) {
3783 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3784 if (bh->header.info != &stg_BLACKHOLE_info &&
3785 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3786 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3787 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3788 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3791 /* zero out the slop so that the sanity checker can tell
3792 * where the next closure is.
3795 StgInfoTable *info = get_itbl(bh);
3796 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3797 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3798 * info is used for a different purpose, and it's exactly the
3799 * same size as a BLACKHOLE in any case.
3801 if (info->type != THUNK_SELECTOR) {
3802 for (i = np; i < np + nw; i++) {
3803 ((StgClosure *)bh)->payload[i] = 0;
3808 SET_INFO(bh,&stg_BLACKHOLE_info);
3812 // Fix the link in the current frame (should point to the frame below)
3813 frame->link = prev_frame;
3814 prev_was_update_frame = is_update_frame;
3817 // Now slide all words from sp up to the next frame
3819 if (displacement > 0) {
3820 P_ next_frame_bottom;
3822 if (next_frame != NULL)
3823 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3825 next_frame_bottom = tso->sp - 1;
3829 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3833 while (sp >= next_frame_bottom) {
3834 sp[displacement] = *sp;
3838 (P_)prev_frame = (P_)frame + displacement;
3842 tso->sp += displacement;
3843 tso->su = prev_frame;
3846 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3847 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3852 /* -----------------------------------------------------------------------------
3855 * We have to prepare for GC - this means doing lazy black holing
3856 * here. We also take the opportunity to do stack squeezing if it's
3858 * -------------------------------------------------------------------------- */
3860 threadPaused(StgTSO *tso)
3862 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3863 threadSqueezeStack(tso); // does black holing too
3865 threadLazyBlackHole(tso);
3868 /* -----------------------------------------------------------------------------
3870 * -------------------------------------------------------------------------- */
3874 printMutOnceList(generation *gen)
3876 StgMutClosure *p, *next;
3878 p = gen->mut_once_list;
3881 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3882 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3883 fprintf(stderr, "%p (%s), ",
3884 p, info_type((StgClosure *)p));
3886 fputc('\n', stderr);
3890 printMutableList(generation *gen)
3892 StgMutClosure *p, *next;
3897 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3898 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3899 fprintf(stderr, "%p (%s), ",
3900 p, info_type((StgClosure *)p));
3902 fputc('\n', stderr);
3905 static inline rtsBool
3906 maybeLarge(StgClosure *closure)
3908 StgInfoTable *info = get_itbl(closure);
3910 /* closure types that may be found on the new_large_objects list;
3911 see scavenge_large */
3912 return (info->type == MUT_ARR_PTRS ||
3913 info->type == MUT_ARR_PTRS_FROZEN ||
3914 info->type == TSO ||
3915 info->type == ARR_WORDS);