1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.128 2001/11/26 16:54:21 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
15 #include "StoragePriv.h"
18 #include "SchedAPI.h" // for ReverCAFs prototype
20 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
30 #if defined(GRAN) || defined(PAR)
31 # include "GranSimRts.h"
32 # include "ParallelRts.h"
36 # include "ParallelDebug.h"
41 #if defined(RTS_GTK_FRONTPANEL)
42 #include "FrontPanel.h"
45 #include "RetainerProfile.h"
46 #include "LdvProfile.h"
48 /* STATIC OBJECT LIST.
51 * We maintain a linked list of static objects that are still live.
52 * The requirements for this list are:
54 * - we need to scan the list while adding to it, in order to
55 * scavenge all the static objects (in the same way that
56 * breadth-first scavenging works for dynamic objects).
58 * - we need to be able to tell whether an object is already on
59 * the list, to break loops.
61 * Each static object has a "static link field", which we use for
62 * linking objects on to the list. We use a stack-type list, consing
63 * objects on the front as they are added (this means that the
64 * scavenge phase is depth-first, not breadth-first, but that
67 * A separate list is kept for objects that have been scavenged
68 * already - this is so that we can zero all the marks afterwards.
70 * An object is on the list if its static link field is non-zero; this
71 * means that we have to mark the end of the list with '1', not NULL.
73 * Extra notes for generational GC:
75 * Each generation has a static object list associated with it. When
76 * collecting generations up to N, we treat the static object lists
77 * from generations > N as roots.
79 * We build up a static object list while collecting generations 0..N,
80 * which is then appended to the static object list of generation N+1.
82 StgClosure* static_objects; // live static objects
83 StgClosure* scavenged_static_objects; // static objects scavenged so far
85 /* N is the oldest generation being collected, where the generations
86 * are numbered starting at 0. A major GC (indicated by the major_gc
87 * flag) is when we're collecting all generations. We only attempt to
88 * deal with static objects and GC CAFs when doing a major GC.
91 static rtsBool major_gc;
93 /* Youngest generation that objects should be evacuated to in
94 * evacuate(). (Logically an argument to evacuate, but it's static
95 * a lot of the time so we optimise it into a global variable).
101 StgWeak *old_weak_ptr_list; // also pending finaliser list
102 static rtsBool weak_done; // all done for this pass
104 /* List of all threads during GC
106 static StgTSO *old_all_threads;
107 static StgTSO *resurrected_threads;
109 /* Flag indicating failure to evacuate an object to the desired
112 static rtsBool failed_to_evac;
114 /* Old to-space (used for two-space collector only)
116 bdescr *old_to_blocks;
118 /* Data used for allocation area sizing.
120 lnat new_blocks; // blocks allocated during this GC
121 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
123 /* Used to avoid long recursion due to selector thunks
125 lnat thunk_selector_depth = 0;
126 #define MAX_THUNK_SELECTOR_DEPTH 256
128 /* -----------------------------------------------------------------------------
129 Static function declarations
130 -------------------------------------------------------------------------- */
132 static void mark_root ( StgClosure **root );
133 static StgClosure * evacuate ( StgClosure *q );
134 static void zero_static_object_list ( StgClosure* first_static );
135 static void zero_mutable_list ( StgMutClosure *first );
137 static rtsBool traverse_weak_ptr_list ( void );
138 static void mark_weak_ptr_list ( StgWeak **list );
140 static void scavenge ( step * );
141 static void scavenge_mark_stack ( void );
142 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
143 static rtsBool scavenge_one ( StgPtr p );
144 static void scavenge_large ( step * );
145 static void scavenge_static ( void );
146 static void scavenge_mutable_list ( generation *g );
147 static void scavenge_mut_once_list ( generation *g );
149 #if 0 && defined(DEBUG)
150 static void gcCAFs ( void );
153 /* -----------------------------------------------------------------------------
154 inline functions etc. for dealing with the mark bitmap & stack.
155 -------------------------------------------------------------------------- */
157 #define MARK_STACK_BLOCKS 4
159 static bdescr *mark_stack_bdescr;
160 static StgPtr *mark_stack;
161 static StgPtr *mark_sp;
162 static StgPtr *mark_splim;
164 // Flag and pointers used for falling back to a linear scan when the
165 // mark stack overflows.
166 static rtsBool mark_stack_overflowed;
167 static bdescr *oldgen_scan_bd;
168 static StgPtr oldgen_scan;
170 static inline rtsBool
171 mark_stack_empty(void)
173 return mark_sp == mark_stack;
176 static inline rtsBool
177 mark_stack_full(void)
179 return mark_sp >= mark_splim;
183 reset_mark_stack(void)
185 mark_sp = mark_stack;
189 push_mark_stack(StgPtr p)
200 /* -----------------------------------------------------------------------------
203 For garbage collecting generation N (and all younger generations):
205 - follow all pointers in the root set. the root set includes all
206 mutable objects in all steps in all generations.
208 - for each pointer, evacuate the object it points to into either
209 + to-space in the next higher step in that generation, if one exists,
210 + if the object's generation == N, then evacuate it to the next
211 generation if one exists, or else to-space in the current
213 + if the object's generation < N, then evacuate it to to-space
214 in the next generation.
216 - repeatedly scavenge to-space from each step in each generation
217 being collected until no more objects can be evacuated.
219 - free from-space in each step, and set from-space = to-space.
221 -------------------------------------------------------------------------- */
224 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
228 lnat live, allocated, collected = 0, copied = 0;
229 lnat oldgen_saved_blocks = 0;
233 CostCentreStack *prev_CCS;
236 #if defined(DEBUG) && defined(GRAN)
237 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
241 // tell the stats department that we've started a GC
244 // Init stats and print par specific (timing) info
245 PAR_TICKY_PAR_START();
247 // attribute any costs to CCS_GC
253 /* Approximate how much we allocated.
254 * Todo: only when generating stats?
256 allocated = calcAllocated();
258 /* Figure out which generation to collect
260 if (force_major_gc) {
261 N = RtsFlags.GcFlags.generations - 1;
265 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
266 if (generations[g].steps[0].n_blocks +
267 generations[g].steps[0].n_large_blocks
268 >= generations[g].max_blocks) {
272 major_gc = (N == RtsFlags.GcFlags.generations-1);
275 #ifdef RTS_GTK_FRONTPANEL
276 if (RtsFlags.GcFlags.frontpanel) {
277 updateFrontPanelBeforeGC(N);
281 // check stack sanity *before* GC (ToDo: check all threads)
283 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
285 IF_DEBUG(sanity, checkFreeListSanity());
287 /* Initialise the static object lists
289 static_objects = END_OF_STATIC_LIST;
290 scavenged_static_objects = END_OF_STATIC_LIST;
292 /* zero the mutable list for the oldest generation (see comment by
293 * zero_mutable_list below).
296 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
299 /* Save the old to-space if we're doing a two-space collection
301 if (RtsFlags.GcFlags.generations == 1) {
302 old_to_blocks = g0s0->to_blocks;
303 g0s0->to_blocks = NULL;
306 /* Keep a count of how many new blocks we allocated during this GC
307 * (used for resizing the allocation area, later).
311 /* Initialise to-space in all the generations/steps that we're
314 for (g = 0; g <= N; g++) {
315 generations[g].mut_once_list = END_MUT_LIST;
316 generations[g].mut_list = END_MUT_LIST;
318 for (s = 0; s < generations[g].n_steps; s++) {
320 // generation 0, step 0 doesn't need to-space
321 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
325 /* Get a free block for to-space. Extra blocks will be chained on
329 stp = &generations[g].steps[s];
330 ASSERT(stp->gen_no == g);
331 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
335 bd->flags = BF_EVACUATED; // it's a to-space block
337 stp->hpLim = stp->hp + BLOCK_SIZE_W;
340 stp->n_to_blocks = 1;
341 stp->scan = bd->start;
343 stp->new_large_objects = NULL;
344 stp->scavenged_large_objects = NULL;
345 stp->n_scavenged_large_blocks = 0;
347 // mark the large objects as not evacuated yet
348 for (bd = stp->large_objects; bd; bd = bd->link) {
349 bd->flags = BF_LARGE;
352 // for a compacted step, we need to allocate the bitmap
353 if (stp->is_compacted) {
354 nat bitmap_size; // in bytes
355 bdescr *bitmap_bdescr;
358 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
360 if (bitmap_size > 0) {
361 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
363 stp->bitmap = bitmap_bdescr;
364 bitmap = bitmap_bdescr->start;
366 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
367 bitmap_size, bitmap););
369 // don't forget to fill it with zeros!
370 memset(bitmap, 0, bitmap_size);
372 // for each block in this step, point to its bitmap from the
374 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
375 bd->u.bitmap = bitmap;
376 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
383 /* make sure the older generations have at least one block to
384 * allocate into (this makes things easier for copy(), see below.
386 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
387 for (s = 0; s < generations[g].n_steps; s++) {
388 stp = &generations[g].steps[s];
389 if (stp->hp_bd == NULL) {
390 ASSERT(stp->blocks == NULL);
395 bd->flags = 0; // *not* a to-space block or a large object
397 stp->hpLim = stp->hp + BLOCK_SIZE_W;
403 /* Set the scan pointer for older generations: remember we
404 * still have to scavenge objects that have been promoted. */
406 stp->scan_bd = stp->hp_bd;
407 stp->to_blocks = NULL;
408 stp->n_to_blocks = 0;
409 stp->new_large_objects = NULL;
410 stp->scavenged_large_objects = NULL;
411 stp->n_scavenged_large_blocks = 0;
415 /* Allocate a mark stack if we're doing a major collection.
418 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
419 mark_stack = (StgPtr *)mark_stack_bdescr->start;
420 mark_sp = mark_stack;
421 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
423 mark_stack_bdescr = NULL;
426 /* -----------------------------------------------------------------------
427 * follow all the roots that we know about:
428 * - mutable lists from each generation > N
429 * we want to *scavenge* these roots, not evacuate them: they're not
430 * going to move in this GC.
431 * Also: do them in reverse generation order. This is because we
432 * often want to promote objects that are pointed to by older
433 * generations early, so we don't have to repeatedly copy them.
434 * Doing the generations in reverse order ensures that we don't end
435 * up in the situation where we want to evac an object to gen 3 and
436 * it has already been evaced to gen 2.
440 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
441 generations[g].saved_mut_list = generations[g].mut_list;
442 generations[g].mut_list = END_MUT_LIST;
445 // Do the mut-once lists first
446 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
447 IF_PAR_DEBUG(verbose,
448 printMutOnceList(&generations[g]));
449 scavenge_mut_once_list(&generations[g]);
451 for (st = generations[g].n_steps-1; st >= 0; st--) {
452 scavenge(&generations[g].steps[st]);
456 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
457 IF_PAR_DEBUG(verbose,
458 printMutableList(&generations[g]));
459 scavenge_mutable_list(&generations[g]);
461 for (st = generations[g].n_steps-1; st >= 0; st--) {
462 scavenge(&generations[g].steps[st]);
467 /* follow roots from the CAF list (used by GHCi)
472 /* follow all the roots that the application knows about.
475 get_roots(mark_root);
478 /* And don't forget to mark the TSO if we got here direct from
480 /* Not needed in a seq version?
482 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
486 // Mark the entries in the GALA table of the parallel system
487 markLocalGAs(major_gc);
488 // Mark all entries on the list of pending fetches
489 markPendingFetches(major_gc);
492 /* Mark the weak pointer list, and prepare to detect dead weak
495 mark_weak_ptr_list(&weak_ptr_list);
496 old_weak_ptr_list = weak_ptr_list;
497 weak_ptr_list = NULL;
498 weak_done = rtsFalse;
500 /* The all_threads list is like the weak_ptr_list.
501 * See traverse_weak_ptr_list() for the details.
503 old_all_threads = all_threads;
504 all_threads = END_TSO_QUEUE;
505 resurrected_threads = END_TSO_QUEUE;
507 /* Mark the stable pointer table.
509 markStablePtrTable(mark_root);
513 /* ToDo: To fix the caf leak, we need to make the commented out
514 * parts of this code do something sensible - as described in
517 extern void markHugsObjects(void);
522 /* -------------------------------------------------------------------------
523 * Repeatedly scavenge all the areas we know about until there's no
524 * more scavenging to be done.
531 // scavenge static objects
532 if (major_gc && static_objects != END_OF_STATIC_LIST) {
533 IF_DEBUG(sanity, checkStaticObjects(static_objects));
537 /* When scavenging the older generations: Objects may have been
538 * evacuated from generations <= N into older generations, and we
539 * need to scavenge these objects. We're going to try to ensure that
540 * any evacuations that occur move the objects into at least the
541 * same generation as the object being scavenged, otherwise we
542 * have to create new entries on the mutable list for the older
546 // scavenge each step in generations 0..maxgen
552 // scavenge objects in compacted generation
553 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
554 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
555 scavenge_mark_stack();
559 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
560 for (st = generations[gen].n_steps; --st >= 0; ) {
561 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
564 stp = &generations[gen].steps[st];
566 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
571 if (stp->new_large_objects != NULL) {
580 if (flag) { goto loop; }
583 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
589 // Reconstruct the Global Address tables used in GUM
590 rebuildGAtables(major_gc);
591 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
594 // Now see which stable names are still alive.
597 // Tidy the end of the to-space chains
598 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
599 for (s = 0; s < generations[g].n_steps; s++) {
600 stp = &generations[g].steps[s];
601 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
602 stp->hp_bd->free = stp->hp;
603 stp->hp_bd->link = NULL;
609 // We call processHeapClosureForDead() on every closure destroyed during
610 // the current garbage collection, so we invoke LdvCensusForDead().
611 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV)
615 // NO MORE EVACUATION AFTER THIS POINT!
616 // Finally: compaction of the oldest generation.
617 if (major_gc && oldest_gen->steps[0].is_compacted) {
618 // save number of blocks for stats
619 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
623 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
625 /* run through all the generations/steps and tidy up
627 copied = new_blocks * BLOCK_SIZE_W;
628 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
631 generations[g].collections++; // for stats
634 for (s = 0; s < generations[g].n_steps; s++) {
636 stp = &generations[g].steps[s];
638 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
639 // stats information: how much we copied
641 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
646 // for generations we collected...
649 // rough calculation of garbage collected, for stats output
650 if (stp->is_compacted) {
651 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
653 collected += stp->n_blocks * BLOCK_SIZE_W;
656 /* free old memory and shift to-space into from-space for all
657 * the collected steps (except the allocation area). These
658 * freed blocks will probaby be quickly recycled.
660 if (!(g == 0 && s == 0)) {
661 if (stp->is_compacted) {
662 // for a compacted step, just shift the new to-space
663 // onto the front of the now-compacted existing blocks.
664 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
665 bd->flags &= ~BF_EVACUATED; // now from-space
667 // tack the new blocks on the end of the existing blocks
668 if (stp->blocks == NULL) {
669 stp->blocks = stp->to_blocks;
671 for (bd = stp->blocks; bd != NULL; bd = next) {
674 bd->link = stp->to_blocks;
678 // add the new blocks to the block tally
679 stp->n_blocks += stp->n_to_blocks;
681 freeChain(stp->blocks);
682 stp->blocks = stp->to_blocks;
683 stp->n_blocks = stp->n_to_blocks;
684 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
685 bd->flags &= ~BF_EVACUATED; // now from-space
688 stp->to_blocks = NULL;
689 stp->n_to_blocks = 0;
692 /* LARGE OBJECTS. The current live large objects are chained on
693 * scavenged_large, having been moved during garbage
694 * collection from large_objects. Any objects left on
695 * large_objects list are therefore dead, so we free them here.
697 for (bd = stp->large_objects; bd != NULL; bd = next) {
703 // update the count of blocks used by large objects
704 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
705 bd->flags &= ~BF_EVACUATED;
707 stp->large_objects = stp->scavenged_large_objects;
708 stp->n_large_blocks = stp->n_scavenged_large_blocks;
711 // for older generations...
713 /* For older generations, we need to append the
714 * scavenged_large_object list (i.e. large objects that have been
715 * promoted during this GC) to the large_object list for that step.
717 for (bd = stp->scavenged_large_objects; bd; bd = next) {
719 bd->flags &= ~BF_EVACUATED;
720 dbl_link_onto(bd, &stp->large_objects);
723 // add the new blocks we promoted during this GC
724 stp->n_blocks += stp->n_to_blocks;
725 stp->n_large_blocks += stp->n_scavenged_large_blocks;
730 /* Reset the sizes of the older generations when we do a major
733 * CURRENT STRATEGY: make all generations except zero the same size.
734 * We have to stay within the maximum heap size, and leave a certain
735 * percentage of the maximum heap size available to allocate into.
737 if (major_gc && RtsFlags.GcFlags.generations > 1) {
738 nat live, size, min_alloc;
739 nat max = RtsFlags.GcFlags.maxHeapSize;
740 nat gens = RtsFlags.GcFlags.generations;
742 // live in the oldest generations
743 live = oldest_gen->steps[0].n_blocks +
744 oldest_gen->steps[0].n_large_blocks;
746 // default max size for all generations except zero
747 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
748 RtsFlags.GcFlags.minOldGenSize);
750 // minimum size for generation zero
751 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
752 RtsFlags.GcFlags.minAllocAreaSize);
754 // Auto-enable compaction when the residency reaches a
755 // certain percentage of the maximum heap size (default: 30%).
756 if (RtsFlags.GcFlags.generations > 1 &&
757 (RtsFlags.GcFlags.compact ||
759 oldest_gen->steps[0].n_blocks >
760 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
761 oldest_gen->steps[0].is_compacted = 1;
762 // fprintf(stderr,"compaction: on\n", live);
764 oldest_gen->steps[0].is_compacted = 0;
765 // fprintf(stderr,"compaction: off\n", live);
768 // if we're going to go over the maximum heap size, reduce the
769 // size of the generations accordingly. The calculation is
770 // different if compaction is turned on, because we don't need
771 // to double the space required to collect the old generation.
774 // this test is necessary to ensure that the calculations
775 // below don't have any negative results - we're working
776 // with unsigned values here.
777 if (max < min_alloc) {
781 if (oldest_gen->steps[0].is_compacted) {
782 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
783 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
786 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
787 size = (max - min_alloc) / ((gens - 1) * 2);
797 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
798 min_alloc, size, max);
801 for (g = 0; g < gens; g++) {
802 generations[g].max_blocks = size;
806 // Guess the amount of live data for stats.
809 /* Free the small objects allocated via allocate(), since this will
810 * all have been copied into G0S1 now.
812 if (small_alloc_list != NULL) {
813 freeChain(small_alloc_list);
815 small_alloc_list = NULL;
819 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
821 // Start a new pinned_object_block
822 pinned_object_block = NULL;
824 /* Free the mark stack.
826 if (mark_stack_bdescr != NULL) {
827 freeGroup(mark_stack_bdescr);
832 for (g = 0; g <= N; g++) {
833 for (s = 0; s < generations[g].n_steps; s++) {
834 stp = &generations[g].steps[s];
835 if (stp->is_compacted && stp->bitmap != NULL) {
836 freeGroup(stp->bitmap);
841 /* Two-space collector:
842 * Free the old to-space, and estimate the amount of live data.
844 if (RtsFlags.GcFlags.generations == 1) {
847 if (old_to_blocks != NULL) {
848 freeChain(old_to_blocks);
850 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
851 bd->flags = 0; // now from-space
854 /* For a two-space collector, we need to resize the nursery. */
856 /* set up a new nursery. Allocate a nursery size based on a
857 * function of the amount of live data (by default a factor of 2)
858 * Use the blocks from the old nursery if possible, freeing up any
861 * If we get near the maximum heap size, then adjust our nursery
862 * size accordingly. If the nursery is the same size as the live
863 * data (L), then we need 3L bytes. We can reduce the size of the
864 * nursery to bring the required memory down near 2L bytes.
866 * A normal 2-space collector would need 4L bytes to give the same
867 * performance we get from 3L bytes, reducing to the same
868 * performance at 2L bytes.
870 blocks = g0s0->n_to_blocks;
872 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
873 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
874 RtsFlags.GcFlags.maxHeapSize ) {
875 long adjusted_blocks; // signed on purpose
878 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
879 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
880 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
881 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
884 blocks = adjusted_blocks;
887 blocks *= RtsFlags.GcFlags.oldGenFactor;
888 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
889 blocks = RtsFlags.GcFlags.minAllocAreaSize;
892 resizeNursery(blocks);
895 /* Generational collector:
896 * If the user has given us a suggested heap size, adjust our
897 * allocation area to make best use of the memory available.
900 if (RtsFlags.GcFlags.heapSizeSuggestion) {
902 nat needed = calcNeeded(); // approx blocks needed at next GC
904 /* Guess how much will be live in generation 0 step 0 next time.
905 * A good approximation is obtained by finding the
906 * percentage of g0s0 that was live at the last minor GC.
909 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
912 /* Estimate a size for the allocation area based on the
913 * information available. We might end up going slightly under
914 * or over the suggested heap size, but we should be pretty
917 * Formula: suggested - needed
918 * ----------------------------
919 * 1 + g0s0_pcnt_kept/100
921 * where 'needed' is the amount of memory needed at the next
922 * collection for collecting all steps except g0s0.
925 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
926 (100 + (long)g0s0_pcnt_kept);
928 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
929 blocks = RtsFlags.GcFlags.minAllocAreaSize;
932 resizeNursery((nat)blocks);
935 // we might have added extra large blocks to the nursery, so
936 // resize back to minAllocAreaSize again.
937 resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
941 // mark the garbage collected CAFs as dead
942 #if 0 && defined(DEBUG) // doesn't work at the moment
943 if (major_gc) { gcCAFs(); }
947 // resetStaticObjectForRetainerProfiling() must be called before
949 resetStaticObjectForRetainerProfiling();
952 // zero the scavenged static object list
954 zero_static_object_list(scavenged_static_objects);
960 // start any pending finalizers
961 scheduleFinalizers(old_weak_ptr_list);
963 // send exceptions to any threads which were about to die
964 resurrectThreads(resurrected_threads);
966 // Update the stable pointer hash table.
967 updateStablePtrTable(major_gc);
969 // check sanity after GC
970 IF_DEBUG(sanity, checkSanity());
972 // extra GC trace info
973 IF_DEBUG(gc, statDescribeGens());
976 // symbol-table based profiling
977 /* heapCensus(to_blocks); */ /* ToDo */
980 // restore enclosing cost centre
985 // check for memory leaks if sanity checking is on
986 IF_DEBUG(sanity, memInventory());
988 #ifdef RTS_GTK_FRONTPANEL
989 if (RtsFlags.GcFlags.frontpanel) {
990 updateFrontPanelAfterGC( N, live );
994 // ok, GC over: tell the stats department what happened.
995 stat_endGC(allocated, collected, live, copied, N);
1001 /* -----------------------------------------------------------------------------
1004 traverse_weak_ptr_list is called possibly many times during garbage
1005 collection. It returns a flag indicating whether it did any work
1006 (i.e. called evacuate on any live pointers).
1008 Invariant: traverse_weak_ptr_list is called when the heap is in an
1009 idempotent state. That means that there are no pending
1010 evacuate/scavenge operations. This invariant helps the weak
1011 pointer code decide which weak pointers are dead - if there are no
1012 new live weak pointers, then all the currently unreachable ones are
1015 For generational GC: we just don't try to finalize weak pointers in
1016 older generations than the one we're collecting. This could
1017 probably be optimised by keeping per-generation lists of weak
1018 pointers, but for a few weak pointers this scheme will work.
1019 -------------------------------------------------------------------------- */
1022 traverse_weak_ptr_list(void)
1024 StgWeak *w, **last_w, *next_w;
1026 rtsBool flag = rtsFalse;
1028 if (weak_done) { return rtsFalse; }
1030 /* doesn't matter where we evacuate values/finalizers to, since
1031 * these pointers are treated as roots (iff the keys are alive).
1035 last_w = &old_weak_ptr_list;
1036 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1038 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1039 * called on a live weak pointer object. Just remove it.
1041 if (w->header.info == &stg_DEAD_WEAK_info) {
1042 next_w = ((StgDeadWeak *)w)->link;
1047 ASSERT(get_itbl(w)->type == WEAK);
1049 /* Now, check whether the key is reachable.
1051 new = isAlive(w->key);
1054 // evacuate the value and finalizer
1055 w->value = evacuate(w->value);
1056 w->finalizer = evacuate(w->finalizer);
1057 // remove this weak ptr from the old_weak_ptr list
1059 // and put it on the new weak ptr list
1061 w->link = weak_ptr_list;
1064 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1068 last_w = &(w->link);
1074 /* Now deal with the all_threads list, which behaves somewhat like
1075 * the weak ptr list. If we discover any threads that are about to
1076 * become garbage, we wake them up and administer an exception.
1079 StgTSO *t, *tmp, *next, **prev;
1081 prev = &old_all_threads;
1082 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1084 (StgClosure *)tmp = isAlive((StgClosure *)t);
1090 ASSERT(get_itbl(t)->type == TSO);
1091 switch (t->what_next) {
1092 case ThreadRelocated:
1097 case ThreadComplete:
1098 // finshed or died. The thread might still be alive, but we
1099 // don't keep it on the all_threads list. Don't forget to
1100 // stub out its global_link field.
1101 next = t->global_link;
1102 t->global_link = END_TSO_QUEUE;
1110 // not alive (yet): leave this thread on the old_all_threads list.
1111 prev = &(t->global_link);
1112 next = t->global_link;
1115 // alive: move this thread onto the all_threads list.
1116 next = t->global_link;
1117 t->global_link = all_threads;
1124 /* If we didn't make any changes, then we can go round and kill all
1125 * the dead weak pointers. The old_weak_ptr list is used as a list
1126 * of pending finalizers later on.
1128 if (flag == rtsFalse) {
1129 for (w = old_weak_ptr_list; w; w = w->link) {
1130 w->finalizer = evacuate(w->finalizer);
1133 /* And resurrect any threads which were about to become garbage.
1136 StgTSO *t, *tmp, *next;
1137 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1138 next = t->global_link;
1139 (StgClosure *)tmp = evacuate((StgClosure *)t);
1140 tmp->global_link = resurrected_threads;
1141 resurrected_threads = tmp;
1145 weak_done = rtsTrue;
1151 /* -----------------------------------------------------------------------------
1152 After GC, the live weak pointer list may have forwarding pointers
1153 on it, because a weak pointer object was evacuated after being
1154 moved to the live weak pointer list. We remove those forwarding
1157 Also, we don't consider weak pointer objects to be reachable, but
1158 we must nevertheless consider them to be "live" and retain them.
1159 Therefore any weak pointer objects which haven't as yet been
1160 evacuated need to be evacuated now.
1161 -------------------------------------------------------------------------- */
1165 mark_weak_ptr_list ( StgWeak **list )
1167 StgWeak *w, **last_w;
1170 for (w = *list; w; w = w->link) {
1171 (StgClosure *)w = evacuate((StgClosure *)w);
1173 last_w = &(w->link);
1177 /* -----------------------------------------------------------------------------
1178 isAlive determines whether the given closure is still alive (after
1179 a garbage collection) or not. It returns the new address of the
1180 closure if it is alive, or NULL otherwise.
1182 NOTE: Use it before compaction only!
1183 -------------------------------------------------------------------------- */
1187 isAlive(StgClosure *p)
1189 const StgInfoTable *info;
1196 /* ToDo: for static closures, check the static link field.
1197 * Problem here is that we sometimes don't set the link field, eg.
1198 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1203 // ignore closures in generations that we're not collecting.
1204 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1207 // large objects have an evacuated flag
1208 if (bd->flags & BF_LARGE) {
1209 if (bd->flags & BF_EVACUATED) {
1215 // check the mark bit for compacted steps
1216 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1220 switch (info->type) {
1225 case IND_OLDGEN: // rely on compatible layout with StgInd
1226 case IND_OLDGEN_PERM:
1227 // follow indirections
1228 p = ((StgInd *)p)->indirectee;
1233 return ((StgEvacuated *)p)->evacuee;
1236 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1237 p = (StgClosure *)((StgTSO *)p)->link;
1249 mark_root(StgClosure **root)
1251 *root = evacuate(*root);
1257 bdescr *bd = allocBlock();
1258 bd->gen_no = stp->gen_no;
1261 if (stp->gen_no <= N) {
1262 bd->flags = BF_EVACUATED;
1267 stp->hp_bd->free = stp->hp;
1268 stp->hp_bd->link = bd;
1269 stp->hp = bd->start;
1270 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1277 static __inline__ void
1278 upd_evacuee(StgClosure *p, StgClosure *dest)
1280 p->header.info = &stg_EVACUATED_info;
1281 ((StgEvacuated *)p)->evacuee = dest;
1285 static __inline__ StgClosure *
1286 copy(StgClosure *src, nat size, step *stp)
1291 nat size_org = size;
1294 TICK_GC_WORDS_COPIED(size);
1295 /* Find out where we're going, using the handy "to" pointer in
1296 * the step of the source object. If it turns out we need to
1297 * evacuate to an older generation, adjust it here (see comment
1300 if (stp->gen_no < evac_gen) {
1301 #ifdef NO_EAGER_PROMOTION
1302 failed_to_evac = rtsTrue;
1304 stp = &generations[evac_gen].steps[0];
1308 /* chain a new block onto the to-space for the destination step if
1311 if (stp->hp + size >= stp->hpLim) {
1315 for(to = stp->hp, from = (P_)src; size>0; --size) {
1321 upd_evacuee(src,(StgClosure *)dest);
1323 // We store the size of the just evacuated object in the LDV word so that
1324 // the profiler can guess the position of the next object later.
1325 SET_EVACUAEE_FOR_LDV(src, size_org);
1327 return (StgClosure *)dest;
1330 /* Special version of copy() for when we only want to copy the info
1331 * pointer of an object, but reserve some padding after it. This is
1332 * used to optimise evacuation of BLACKHOLEs.
1337 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1342 nat size_to_copy_org = size_to_copy;
1345 TICK_GC_WORDS_COPIED(size_to_copy);
1346 if (stp->gen_no < evac_gen) {
1347 #ifdef NO_EAGER_PROMOTION
1348 failed_to_evac = rtsTrue;
1350 stp = &generations[evac_gen].steps[0];
1354 if (stp->hp + size_to_reserve >= stp->hpLim) {
1358 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1363 stp->hp += size_to_reserve;
1364 upd_evacuee(src,(StgClosure *)dest);
1366 // We store the size of the just evacuated object in the LDV word so that
1367 // the profiler can guess the position of the next object later.
1368 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1370 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1372 if (size_to_reserve - size_to_copy_org > 0)
1373 FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1375 return (StgClosure *)dest;
1379 /* -----------------------------------------------------------------------------
1380 Evacuate a large object
1382 This just consists of removing the object from the (doubly-linked)
1383 large_alloc_list, and linking it on to the (singly-linked)
1384 new_large_objects list, from where it will be scavenged later.
1386 Convention: bd->flags has BF_EVACUATED set for a large object
1387 that has been evacuated, or unset otherwise.
1388 -------------------------------------------------------------------------- */
1392 evacuate_large(StgPtr p)
1394 bdescr *bd = Bdescr(p);
1397 // object must be at the beginning of the block (or be a ByteArray)
1398 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1399 (((W_)p & BLOCK_MASK) == 0));
1401 // already evacuated?
1402 if (bd->flags & BF_EVACUATED) {
1403 /* Don't forget to set the failed_to_evac flag if we didn't get
1404 * the desired destination (see comments in evacuate()).
1406 if (bd->gen_no < evac_gen) {
1407 failed_to_evac = rtsTrue;
1408 TICK_GC_FAILED_PROMOTION();
1414 // remove from large_object list
1416 bd->u.back->link = bd->link;
1417 } else { // first object in the list
1418 stp->large_objects = bd->link;
1421 bd->link->u.back = bd->u.back;
1424 /* link it on to the evacuated large object list of the destination step
1427 if (stp->gen_no < evac_gen) {
1428 #ifdef NO_EAGER_PROMOTION
1429 failed_to_evac = rtsTrue;
1431 stp = &generations[evac_gen].steps[0];
1436 bd->gen_no = stp->gen_no;
1437 bd->link = stp->new_large_objects;
1438 stp->new_large_objects = bd;
1439 bd->flags |= BF_EVACUATED;
1442 /* -----------------------------------------------------------------------------
1443 Adding a MUT_CONS to an older generation.
1445 This is necessary from time to time when we end up with an
1446 old-to-new generation pointer in a non-mutable object. We defer
1447 the promotion until the next GC.
1448 -------------------------------------------------------------------------- */
1452 mkMutCons(StgClosure *ptr, generation *gen)
1457 stp = &gen->steps[0];
1459 /* chain a new block onto the to-space for the destination step if
1462 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1466 q = (StgMutVar *)stp->hp;
1467 stp->hp += sizeofW(StgMutVar);
1469 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1471 recordOldToNewPtrs((StgMutClosure *)q);
1473 return (StgClosure *)q;
1476 /* -----------------------------------------------------------------------------
1479 This is called (eventually) for every live object in the system.
1481 The caller to evacuate specifies a desired generation in the
1482 evac_gen global variable. The following conditions apply to
1483 evacuating an object which resides in generation M when we're
1484 collecting up to generation N
1488 else evac to step->to
1490 if M < evac_gen evac to evac_gen, step 0
1492 if the object is already evacuated, then we check which generation
1495 if M >= evac_gen do nothing
1496 if M < evac_gen set failed_to_evac flag to indicate that we
1497 didn't manage to evacuate this object into evac_gen.
1499 -------------------------------------------------------------------------- */
1502 evacuate(StgClosure *q)
1507 const StgInfoTable *info;
1510 if (HEAP_ALLOCED(q)) {
1513 // not a group head: find the group head
1514 if (bd->blocks == 0) { bd = bd->link; }
1516 if (bd->gen_no > N) {
1517 /* Can't evacuate this object, because it's in a generation
1518 * older than the ones we're collecting. Let's hope that it's
1519 * in evac_gen or older, or we will have to arrange to track
1520 * this pointer using the mutable list.
1522 if (bd->gen_no < evac_gen) {
1524 failed_to_evac = rtsTrue;
1525 TICK_GC_FAILED_PROMOTION();
1530 /* evacuate large objects by re-linking them onto a different list.
1532 if (bd->flags & BF_LARGE) {
1534 if (info->type == TSO &&
1535 ((StgTSO *)q)->what_next == ThreadRelocated) {
1536 q = (StgClosure *)((StgTSO *)q)->link;
1539 evacuate_large((P_)q);
1543 /* If the object is in a step that we're compacting, then we
1544 * need to use an alternative evacuate procedure.
1546 if (bd->step->is_compacted) {
1547 if (!is_marked((P_)q,bd)) {
1549 if (mark_stack_full()) {
1550 mark_stack_overflowed = rtsTrue;
1553 push_mark_stack((P_)q);
1561 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1564 // make sure the info pointer is into text space
1565 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1566 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1569 switch (info -> type) {
1573 to = copy(q,sizeW_fromITBL(info),stp);
1578 StgWord w = (StgWord)q->payload[0];
1579 if (q->header.info == Czh_con_info &&
1580 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1581 (StgChar)w <= MAX_CHARLIKE) {
1582 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1584 if (q->header.info == Izh_con_info &&
1585 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1586 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1588 // else, fall through ...
1594 return copy(q,sizeofW(StgHeader)+1,stp);
1596 case THUNK_1_0: // here because of MIN_UPD_SIZE
1601 #ifdef NO_PROMOTE_THUNKS
1602 if (bd->gen_no == 0 &&
1603 bd->step->no != 0 &&
1604 bd->step->no == generations[bd->gen_no].n_steps-1) {
1608 return copy(q,sizeofW(StgHeader)+2,stp);
1616 return copy(q,sizeofW(StgHeader)+2,stp);
1622 case IND_OLDGEN_PERM:
1627 return copy(q,sizeW_fromITBL(info),stp);
1630 case SE_CAF_BLACKHOLE:
1633 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1636 to = copy(q,BLACKHOLE_sizeW(),stp);
1639 case THUNK_SELECTOR:
1641 const StgInfoTable* selectee_info;
1642 StgClosure* selectee = ((StgSelector*)q)->selectee;
1645 selectee_info = get_itbl(selectee);
1646 switch (selectee_info->type) {
1654 case CONSTR_NOCAF_STATIC:
1656 StgWord offset = info->layout.selector_offset;
1658 // check that the size is in range
1660 (StgWord32)(selectee_info->layout.payload.ptrs +
1661 selectee_info->layout.payload.nptrs));
1663 // perform the selection!
1664 q = selectee->payload[offset];
1666 /* if we're already in to-space, there's no need to continue
1667 * with the evacuation, just update the source address with
1668 * a pointer to the (evacuated) constructor field.
1670 if (HEAP_ALLOCED(q)) {
1671 bdescr *bd = Bdescr((P_)q);
1672 if (bd->flags & BF_EVACUATED) {
1673 if (bd->gen_no < evac_gen) {
1674 failed_to_evac = rtsTrue;
1675 TICK_GC_FAILED_PROMOTION();
1681 /* otherwise, carry on and evacuate this constructor field,
1682 * (but not the constructor itself)
1691 case IND_OLDGEN_PERM:
1692 selectee = ((StgInd *)selectee)->indirectee;
1696 selectee = ((StgEvacuated *)selectee)->evacuee;
1699 case THUNK_SELECTOR:
1701 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1702 something) to go into an infinite loop when the nightly
1703 stage2 compiles PrelTup.lhs. */
1705 /* we can't recurse indefinitely in evacuate(), so set a
1706 * limit on the number of times we can go around this
1709 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1711 bd = Bdescr((P_)selectee);
1712 if (!bd->flags & BF_EVACUATED) {
1713 thunk_selector_depth++;
1714 selectee = evacuate(selectee);
1715 thunk_selector_depth--;
1719 // otherwise, fall through...
1731 case SE_CAF_BLACKHOLE:
1735 // not evaluated yet
1739 // a copy of the top-level cases below
1740 case RBH: // cf. BLACKHOLE_BQ
1742 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1743 to = copy(q,BLACKHOLE_sizeW(),stp);
1744 //ToDo: derive size etc from reverted IP
1745 //to = copy(q,size,stp);
1746 // recordMutable((StgMutClosure *)to);
1751 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1752 to = copy(q,sizeofW(StgBlockedFetch),stp);
1759 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1760 to = copy(q,sizeofW(StgFetchMe),stp);
1764 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1765 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1770 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1771 (int)(selectee_info->type));
1774 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1778 // follow chains of indirections, don't evacuate them
1779 q = ((StgInd*)q)->indirectee;
1783 if (info->srt_len > 0 && major_gc &&
1784 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1785 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1786 static_objects = (StgClosure *)q;
1791 if (info->srt_len > 0 && major_gc &&
1792 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1793 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1794 static_objects = (StgClosure *)q;
1799 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1800 * on the CAF list, so don't do anything with it here (we'll
1801 * scavenge it later).
1804 && ((StgIndStatic *)q)->saved_info == NULL
1805 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1806 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1807 static_objects = (StgClosure *)q;
1812 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1813 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1814 static_objects = (StgClosure *)q;
1818 case CONSTR_INTLIKE:
1819 case CONSTR_CHARLIKE:
1820 case CONSTR_NOCAF_STATIC:
1821 /* no need to put these on the static linked list, they don't need
1836 // shouldn't see these
1837 barf("evacuate: stack frame at %p\n", q);
1841 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1842 * of stack, tagging and all.
1844 return copy(q,pap_sizeW((StgPAP*)q),stp);
1847 /* Already evacuated, just return the forwarding address.
1848 * HOWEVER: if the requested destination generation (evac_gen) is
1849 * older than the actual generation (because the object was
1850 * already evacuated to a younger generation) then we have to
1851 * set the failed_to_evac flag to indicate that we couldn't
1852 * manage to promote the object to the desired generation.
1854 if (evac_gen > 0) { // optimisation
1855 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1856 if (Bdescr((P_)p)->gen_no < evac_gen) {
1857 failed_to_evac = rtsTrue;
1858 TICK_GC_FAILED_PROMOTION();
1861 return ((StgEvacuated*)q)->evacuee;
1864 // just copy the block
1865 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1868 case MUT_ARR_PTRS_FROZEN:
1869 // just copy the block
1870 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1874 StgTSO *tso = (StgTSO *)q;
1876 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1878 if (tso->what_next == ThreadRelocated) {
1879 q = (StgClosure *)tso->link;
1883 /* To evacuate a small TSO, we need to relocate the update frame
1887 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1888 move_TSO(tso, new_tso);
1889 return (StgClosure *)new_tso;
1894 case RBH: // cf. BLACKHOLE_BQ
1896 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1897 to = copy(q,BLACKHOLE_sizeW(),stp);
1898 //ToDo: derive size etc from reverted IP
1899 //to = copy(q,size,stp);
1901 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1902 q, info_type(q), to, info_type(to)));
1907 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1908 to = copy(q,sizeofW(StgBlockedFetch),stp);
1910 belch("@@ evacuate: %p (%s) to %p (%s)",
1911 q, info_type(q), to, info_type(to)));
1918 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1919 to = copy(q,sizeofW(StgFetchMe),stp);
1921 belch("@@ evacuate: %p (%s) to %p (%s)",
1922 q, info_type(q), to, info_type(to)));
1926 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1927 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1929 belch("@@ evacuate: %p (%s) to %p (%s)",
1930 q, info_type(q), to, info_type(to)));
1935 barf("evacuate: strange closure type %d", (int)(info->type));
1941 /* -----------------------------------------------------------------------------
1942 move_TSO is called to update the TSO structure after it has been
1943 moved from one place to another.
1944 -------------------------------------------------------------------------- */
1947 move_TSO(StgTSO *src, StgTSO *dest)
1951 // relocate the stack pointers...
1952 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1953 dest->sp = (StgPtr)dest->sp + diff;
1954 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1956 relocate_stack(dest, diff);
1959 /* -----------------------------------------------------------------------------
1960 relocate_stack is called to update the linkage between
1961 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1963 -------------------------------------------------------------------------- */
1966 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1974 while ((P_)su < dest->stack + dest->stack_size) {
1975 switch (get_itbl(su)->type) {
1977 // GCC actually manages to common up these three cases!
1980 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1985 cf = (StgCatchFrame *)su;
1986 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1991 sf = (StgSeqFrame *)su;
1992 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
2001 barf("relocate_stack %d", (int)(get_itbl(su)->type));
2012 scavenge_srt(const StgInfoTable *info)
2014 StgClosure **srt, **srt_end;
2016 /* evacuate the SRT. If srt_len is zero, then there isn't an
2017 * srt field in the info table. That's ok, because we'll
2018 * never dereference it.
2020 srt = (StgClosure **)(info->srt);
2021 srt_end = srt + info->srt_len;
2022 for (; srt < srt_end; srt++) {
2023 /* Special-case to handle references to closures hiding out in DLLs, since
2024 double indirections required to get at those. The code generator knows
2025 which is which when generating the SRT, so it stores the (indirect)
2026 reference to the DLL closure in the table by first adding one to it.
2027 We check for this here, and undo the addition before evacuating it.
2029 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2030 closure that's fixed at link-time, and no extra magic is required.
2032 #ifdef ENABLE_WIN32_DLL_SUPPORT
2033 if ( (unsigned long)(*srt) & 0x1 ) {
2034 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2044 /* -----------------------------------------------------------------------------
2046 -------------------------------------------------------------------------- */
2049 scavengeTSO (StgTSO *tso)
2051 // chase the link field for any TSOs on the same queue
2052 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2053 if ( tso->why_blocked == BlockedOnMVar
2054 || tso->why_blocked == BlockedOnBlackHole
2055 || tso->why_blocked == BlockedOnException
2057 || tso->why_blocked == BlockedOnGA
2058 || tso->why_blocked == BlockedOnGA_NoSend
2061 tso->block_info.closure = evacuate(tso->block_info.closure);
2063 if ( tso->blocked_exceptions != NULL ) {
2064 tso->blocked_exceptions =
2065 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2067 // scavenge this thread's stack
2068 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2071 /* -----------------------------------------------------------------------------
2072 Scavenge a given step until there are no more objects in this step
2075 evac_gen is set by the caller to be either zero (for a step in a
2076 generation < N) or G where G is the generation of the step being
2079 We sometimes temporarily change evac_gen back to zero if we're
2080 scavenging a mutable object where early promotion isn't such a good
2082 -------------------------------------------------------------------------- */
2090 nat saved_evac_gen = evac_gen;
2095 failed_to_evac = rtsFalse;
2097 /* scavenge phase - standard breadth-first scavenging of the
2101 while (bd != stp->hp_bd || p < stp->hp) {
2103 // If we're at the end of this block, move on to the next block
2104 if (bd != stp->hp_bd && p == bd->free) {
2110 info = get_itbl((StgClosure *)p);
2111 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2114 switch (info->type) {
2117 /* treat MVars specially, because we don't want to evacuate the
2118 * mut_link field in the middle of the closure.
2121 StgMVar *mvar = ((StgMVar *)p);
2123 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2124 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2125 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2126 evac_gen = saved_evac_gen;
2127 recordMutable((StgMutClosure *)mvar);
2128 failed_to_evac = rtsFalse; // mutable.
2129 p += sizeofW(StgMVar);
2137 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2138 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2139 p += sizeofW(StgHeader) + 2;
2144 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2145 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2151 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2152 p += sizeofW(StgHeader) + 1;
2157 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2163 p += sizeofW(StgHeader) + 1;
2170 p += sizeofW(StgHeader) + 2;
2177 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2178 p += sizeofW(StgHeader) + 2;
2194 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2195 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2196 (StgClosure *)*p = evacuate((StgClosure *)*p);
2198 p += info->layout.payload.nptrs;
2203 if (stp->gen->no != 0) {
2206 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2207 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2208 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2211 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
2213 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2216 // We pretend that p has just been created.
2217 LDV_recordCreate((StgClosure *)p);
2221 case IND_OLDGEN_PERM:
2222 ((StgIndOldGen *)p)->indirectee =
2223 evacuate(((StgIndOldGen *)p)->indirectee);
2224 if (failed_to_evac) {
2225 failed_to_evac = rtsFalse;
2226 recordOldToNewPtrs((StgMutClosure *)p);
2228 p += sizeofW(StgIndOldGen);
2233 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2234 evac_gen = saved_evac_gen;
2235 recordMutable((StgMutClosure *)p);
2236 failed_to_evac = rtsFalse; // mutable anyhow
2237 p += sizeofW(StgMutVar);
2242 failed_to_evac = rtsFalse; // mutable anyhow
2243 p += sizeofW(StgMutVar);
2247 case SE_CAF_BLACKHOLE:
2250 p += BLACKHOLE_sizeW();
2255 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2256 (StgClosure *)bh->blocking_queue =
2257 evacuate((StgClosure *)bh->blocking_queue);
2258 recordMutable((StgMutClosure *)bh);
2259 failed_to_evac = rtsFalse;
2260 p += BLACKHOLE_sizeW();
2264 case THUNK_SELECTOR:
2266 StgSelector *s = (StgSelector *)p;
2267 s->selectee = evacuate(s->selectee);
2268 p += THUNK_SELECTOR_sizeW();
2272 case AP_UPD: // same as PAPs
2274 /* Treat a PAP just like a section of stack, not forgetting to
2275 * evacuate the function pointer too...
2278 StgPAP* pap = (StgPAP *)p;
2280 pap->fun = evacuate(pap->fun);
2281 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2282 p += pap_sizeW(pap);
2287 // nothing to follow
2288 p += arr_words_sizeW((StgArrWords *)p);
2292 // follow everything
2296 evac_gen = 0; // repeatedly mutable
2297 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2298 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2299 (StgClosure *)*p = evacuate((StgClosure *)*p);
2301 evac_gen = saved_evac_gen;
2302 recordMutable((StgMutClosure *)q);
2303 failed_to_evac = rtsFalse; // mutable anyhow.
2307 case MUT_ARR_PTRS_FROZEN:
2308 // follow everything
2312 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2313 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2314 (StgClosure *)*p = evacuate((StgClosure *)*p);
2316 // it's tempting to recordMutable() if failed_to_evac is
2317 // false, but that breaks some assumptions (eg. every
2318 // closure on the mutable list is supposed to have the MUT
2319 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2325 StgTSO *tso = (StgTSO *)p;
2328 evac_gen = saved_evac_gen;
2329 recordMutable((StgMutClosure *)tso);
2330 failed_to_evac = rtsFalse; // mutable anyhow.
2331 p += tso_sizeW(tso);
2336 case RBH: // cf. BLACKHOLE_BQ
2339 nat size, ptrs, nonptrs, vhs;
2341 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2343 StgRBH *rbh = (StgRBH *)p;
2344 (StgClosure *)rbh->blocking_queue =
2345 evacuate((StgClosure *)rbh->blocking_queue);
2346 recordMutable((StgMutClosure *)to);
2347 failed_to_evac = rtsFalse; // mutable anyhow.
2349 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2350 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2351 // ToDo: use size of reverted closure here!
2352 p += BLACKHOLE_sizeW();
2358 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2359 // follow the pointer to the node which is being demanded
2360 (StgClosure *)bf->node =
2361 evacuate((StgClosure *)bf->node);
2362 // follow the link to the rest of the blocking queue
2363 (StgClosure *)bf->link =
2364 evacuate((StgClosure *)bf->link);
2365 if (failed_to_evac) {
2366 failed_to_evac = rtsFalse;
2367 recordMutable((StgMutClosure *)bf);
2370 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2371 bf, info_type((StgClosure *)bf),
2372 bf->node, info_type(bf->node)));
2373 p += sizeofW(StgBlockedFetch);
2381 p += sizeofW(StgFetchMe);
2382 break; // nothing to do in this case
2384 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2386 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2387 (StgClosure *)fmbq->blocking_queue =
2388 evacuate((StgClosure *)fmbq->blocking_queue);
2389 if (failed_to_evac) {
2390 failed_to_evac = rtsFalse;
2391 recordMutable((StgMutClosure *)fmbq);
2394 belch("@@ scavenge: %p (%s) exciting, isn't it",
2395 p, info_type((StgClosure *)p)));
2396 p += sizeofW(StgFetchMeBlockingQueue);
2402 barf("scavenge: unimplemented/strange closure type %d @ %p",
2406 /* If we didn't manage to promote all the objects pointed to by
2407 * the current object, then we have to designate this object as
2408 * mutable (because it contains old-to-new generation pointers).
2410 if (failed_to_evac) {
2411 failed_to_evac = rtsFalse;
2412 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2420 /* -----------------------------------------------------------------------------
2421 Scavenge everything on the mark stack.
2423 This is slightly different from scavenge():
2424 - we don't walk linearly through the objects, so the scavenger
2425 doesn't need to advance the pointer on to the next object.
2426 -------------------------------------------------------------------------- */
2429 scavenge_mark_stack(void)
2435 evac_gen = oldest_gen->no;
2436 saved_evac_gen = evac_gen;
2439 while (!mark_stack_empty()) {
2440 p = pop_mark_stack();
2442 info = get_itbl((StgClosure *)p);
2443 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2446 switch (info->type) {
2449 /* treat MVars specially, because we don't want to evacuate the
2450 * mut_link field in the middle of the closure.
2453 StgMVar *mvar = ((StgMVar *)p);
2455 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2456 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2457 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2458 evac_gen = saved_evac_gen;
2459 failed_to_evac = rtsFalse; // mutable.
2467 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2468 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2478 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2503 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2504 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2505 (StgClosure *)*p = evacuate((StgClosure *)*p);
2511 // don't need to do anything here: the only possible case
2512 // is that we're in a 1-space compacting collector, with
2513 // no "old" generation.
2517 case IND_OLDGEN_PERM:
2518 ((StgIndOldGen *)p)->indirectee =
2519 evacuate(((StgIndOldGen *)p)->indirectee);
2520 if (failed_to_evac) {
2521 recordOldToNewPtrs((StgMutClosure *)p);
2523 failed_to_evac = rtsFalse;
2528 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2529 evac_gen = saved_evac_gen;
2530 failed_to_evac = rtsFalse;
2535 failed_to_evac = rtsFalse;
2539 case SE_CAF_BLACKHOLE:
2547 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2548 (StgClosure *)bh->blocking_queue =
2549 evacuate((StgClosure *)bh->blocking_queue);
2550 failed_to_evac = rtsFalse;
2554 case THUNK_SELECTOR:
2556 StgSelector *s = (StgSelector *)p;
2557 s->selectee = evacuate(s->selectee);
2561 case AP_UPD: // same as PAPs
2563 /* Treat a PAP just like a section of stack, not forgetting to
2564 * evacuate the function pointer too...
2567 StgPAP* pap = (StgPAP *)p;
2569 pap->fun = evacuate(pap->fun);
2570 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2575 // follow everything
2579 evac_gen = 0; // repeatedly mutable
2580 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2581 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2582 (StgClosure *)*p = evacuate((StgClosure *)*p);
2584 evac_gen = saved_evac_gen;
2585 failed_to_evac = rtsFalse; // mutable anyhow.
2589 case MUT_ARR_PTRS_FROZEN:
2590 // follow everything
2594 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2595 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2596 (StgClosure *)*p = evacuate((StgClosure *)*p);
2603 StgTSO *tso = (StgTSO *)p;
2606 evac_gen = saved_evac_gen;
2607 failed_to_evac = rtsFalse;
2612 case RBH: // cf. BLACKHOLE_BQ
2615 nat size, ptrs, nonptrs, vhs;
2617 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2619 StgRBH *rbh = (StgRBH *)p;
2620 (StgClosure *)rbh->blocking_queue =
2621 evacuate((StgClosure *)rbh->blocking_queue);
2622 recordMutable((StgMutClosure *)rbh);
2623 failed_to_evac = rtsFalse; // mutable anyhow.
2625 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2626 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2632 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2633 // follow the pointer to the node which is being demanded
2634 (StgClosure *)bf->node =
2635 evacuate((StgClosure *)bf->node);
2636 // follow the link to the rest of the blocking queue
2637 (StgClosure *)bf->link =
2638 evacuate((StgClosure *)bf->link);
2639 if (failed_to_evac) {
2640 failed_to_evac = rtsFalse;
2641 recordMutable((StgMutClosure *)bf);
2644 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2645 bf, info_type((StgClosure *)bf),
2646 bf->node, info_type(bf->node)));
2654 break; // nothing to do in this case
2656 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2658 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2659 (StgClosure *)fmbq->blocking_queue =
2660 evacuate((StgClosure *)fmbq->blocking_queue);
2661 if (failed_to_evac) {
2662 failed_to_evac = rtsFalse;
2663 recordMutable((StgMutClosure *)fmbq);
2666 belch("@@ scavenge: %p (%s) exciting, isn't it",
2667 p, info_type((StgClosure *)p)));
2673 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2677 if (failed_to_evac) {
2678 failed_to_evac = rtsFalse;
2679 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2682 // mark the next bit to indicate "scavenged"
2683 mark(q+1, Bdescr(q));
2685 } // while (!mark_stack_empty())
2687 // start a new linear scan if the mark stack overflowed at some point
2688 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2689 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2690 mark_stack_overflowed = rtsFalse;
2691 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2692 oldgen_scan = oldgen_scan_bd->start;
2695 if (oldgen_scan_bd) {
2696 // push a new thing on the mark stack
2698 // find a closure that is marked but not scavenged, and start
2700 while (oldgen_scan < oldgen_scan_bd->free
2701 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2705 if (oldgen_scan < oldgen_scan_bd->free) {
2707 // already scavenged?
2708 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2709 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2712 push_mark_stack(oldgen_scan);
2713 // ToDo: bump the linear scan by the actual size of the object
2714 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2718 oldgen_scan_bd = oldgen_scan_bd->link;
2719 if (oldgen_scan_bd != NULL) {
2720 oldgen_scan = oldgen_scan_bd->start;
2726 /* -----------------------------------------------------------------------------
2727 Scavenge one object.
2729 This is used for objects that are temporarily marked as mutable
2730 because they contain old-to-new generation pointers. Only certain
2731 objects can have this property.
2732 -------------------------------------------------------------------------- */
2735 scavenge_one(StgPtr p)
2737 const StgInfoTable *info;
2738 nat saved_evac_gen = evac_gen;
2741 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2742 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2744 info = get_itbl((StgClosure *)p);
2746 switch (info->type) {
2749 case FUN_1_0: // hardly worth specialising these guys
2769 case IND_OLDGEN_PERM:
2773 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2774 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2775 (StgClosure *)*q = evacuate((StgClosure *)*q);
2781 case SE_CAF_BLACKHOLE:
2786 case THUNK_SELECTOR:
2788 StgSelector *s = (StgSelector *)p;
2789 s->selectee = evacuate(s->selectee);
2794 // nothing to follow
2799 // follow everything
2802 evac_gen = 0; // repeatedly mutable
2803 recordMutable((StgMutClosure *)p);
2804 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2805 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2806 (StgClosure *)*p = evacuate((StgClosure *)*p);
2808 evac_gen = saved_evac_gen;
2809 failed_to_evac = rtsFalse;
2813 case MUT_ARR_PTRS_FROZEN:
2815 // follow everything
2818 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2819 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2820 (StgClosure *)*p = evacuate((StgClosure *)*p);
2827 StgTSO *tso = (StgTSO *)p;
2829 evac_gen = 0; // repeatedly mutable
2831 recordMutable((StgMutClosure *)tso);
2832 evac_gen = saved_evac_gen;
2833 failed_to_evac = rtsFalse;
2840 StgPAP* pap = (StgPAP *)p;
2841 pap->fun = evacuate(pap->fun);
2842 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2847 // This might happen if for instance a MUT_CONS was pointing to a
2848 // THUNK which has since been updated. The IND_OLDGEN will
2849 // be on the mutable list anyway, so we don't need to do anything
2854 barf("scavenge_one: strange object %d", (int)(info->type));
2857 no_luck = failed_to_evac;
2858 failed_to_evac = rtsFalse;
2862 /* -----------------------------------------------------------------------------
2863 Scavenging mutable lists.
2865 We treat the mutable list of each generation > N (i.e. all the
2866 generations older than the one being collected) as roots. We also
2867 remove non-mutable objects from the mutable list at this point.
2868 -------------------------------------------------------------------------- */
2871 scavenge_mut_once_list(generation *gen)
2873 const StgInfoTable *info;
2874 StgMutClosure *p, *next, *new_list;
2876 p = gen->mut_once_list;
2877 new_list = END_MUT_LIST;
2881 failed_to_evac = rtsFalse;
2883 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2885 // make sure the info pointer is into text space
2886 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2887 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2891 if (info->type==RBH)
2892 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2894 switch(info->type) {
2897 case IND_OLDGEN_PERM:
2899 /* Try to pull the indirectee into this generation, so we can
2900 * remove the indirection from the mutable list.
2902 ((StgIndOldGen *)p)->indirectee =
2903 evacuate(((StgIndOldGen *)p)->indirectee);
2905 #if 0 && defined(DEBUG)
2906 if (RtsFlags.DebugFlags.gc)
2907 /* Debugging code to print out the size of the thing we just
2911 StgPtr start = gen->steps[0].scan;
2912 bdescr *start_bd = gen->steps[0].scan_bd;
2914 scavenge(&gen->steps[0]);
2915 if (start_bd != gen->steps[0].scan_bd) {
2916 size += (P_)BLOCK_ROUND_UP(start) - start;
2917 start_bd = start_bd->link;
2918 while (start_bd != gen->steps[0].scan_bd) {
2919 size += BLOCK_SIZE_W;
2920 start_bd = start_bd->link;
2922 size += gen->steps[0].scan -
2923 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2925 size = gen->steps[0].scan - start;
2927 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2931 /* failed_to_evac might happen if we've got more than two
2932 * generations, we're collecting only generation 0, the
2933 * indirection resides in generation 2 and the indirectee is
2936 if (failed_to_evac) {
2937 failed_to_evac = rtsFalse;
2938 p->mut_link = new_list;
2941 /* the mut_link field of an IND_STATIC is overloaded as the
2942 * static link field too (it just so happens that we don't need
2943 * both at the same time), so we need to NULL it out when
2944 * removing this object from the mutable list because the static
2945 * link fields are all assumed to be NULL before doing a major
2953 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2954 * it from the mutable list if possible by promoting whatever it
2957 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2958 /* didn't manage to promote everything, so put the
2959 * MUT_CONS back on the list.
2961 p->mut_link = new_list;
2967 // shouldn't have anything else on the mutables list
2968 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2972 gen->mut_once_list = new_list;
2977 scavenge_mutable_list(generation *gen)
2979 const StgInfoTable *info;
2980 StgMutClosure *p, *next;
2982 p = gen->saved_mut_list;
2986 failed_to_evac = rtsFalse;
2988 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2990 // make sure the info pointer is into text space
2991 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2992 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2996 if (info->type==RBH)
2997 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2999 switch(info->type) {
3002 // follow everything
3003 p->mut_link = gen->mut_list;
3008 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3009 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3010 (StgClosure *)*q = evacuate((StgClosure *)*q);
3015 // Happens if a MUT_ARR_PTRS in the old generation is frozen
3016 case MUT_ARR_PTRS_FROZEN:
3021 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3022 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3023 (StgClosure *)*q = evacuate((StgClosure *)*q);
3027 if (failed_to_evac) {
3028 failed_to_evac = rtsFalse;
3029 mkMutCons((StgClosure *)p, gen);
3035 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3036 p->mut_link = gen->mut_list;
3042 StgMVar *mvar = (StgMVar *)p;
3043 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
3044 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
3045 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
3046 p->mut_link = gen->mut_list;
3053 StgTSO *tso = (StgTSO *)p;
3057 /* Don't take this TSO off the mutable list - it might still
3058 * point to some younger objects (because we set evac_gen to 0
3061 tso->mut_link = gen->mut_list;
3062 gen->mut_list = (StgMutClosure *)tso;
3068 StgBlockingQueue *bh = (StgBlockingQueue *)p;
3069 (StgClosure *)bh->blocking_queue =
3070 evacuate((StgClosure *)bh->blocking_queue);
3071 p->mut_link = gen->mut_list;
3076 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3079 case IND_OLDGEN_PERM:
3080 /* Try to pull the indirectee into this generation, so we can
3081 * remove the indirection from the mutable list.
3084 ((StgIndOldGen *)p)->indirectee =
3085 evacuate(((StgIndOldGen *)p)->indirectee);
3088 if (failed_to_evac) {
3089 failed_to_evac = rtsFalse;
3090 p->mut_link = gen->mut_once_list;
3091 gen->mut_once_list = p;
3098 // HWL: check whether all of these are necessary
3100 case RBH: // cf. BLACKHOLE_BQ
3102 // nat size, ptrs, nonptrs, vhs;
3104 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3105 StgRBH *rbh = (StgRBH *)p;
3106 (StgClosure *)rbh->blocking_queue =
3107 evacuate((StgClosure *)rbh->blocking_queue);
3108 if (failed_to_evac) {
3109 failed_to_evac = rtsFalse;
3110 recordMutable((StgMutClosure *)rbh);
3112 // ToDo: use size of reverted closure here!
3113 p += BLACKHOLE_sizeW();
3119 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3120 // follow the pointer to the node which is being demanded
3121 (StgClosure *)bf->node =
3122 evacuate((StgClosure *)bf->node);
3123 // follow the link to the rest of the blocking queue
3124 (StgClosure *)bf->link =
3125 evacuate((StgClosure *)bf->link);
3126 if (failed_to_evac) {
3127 failed_to_evac = rtsFalse;
3128 recordMutable((StgMutClosure *)bf);
3130 p += sizeofW(StgBlockedFetch);
3136 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3139 p += sizeofW(StgFetchMe);
3140 break; // nothing to do in this case
3142 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3144 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3145 (StgClosure *)fmbq->blocking_queue =
3146 evacuate((StgClosure *)fmbq->blocking_queue);
3147 if (failed_to_evac) {
3148 failed_to_evac = rtsFalse;
3149 recordMutable((StgMutClosure *)fmbq);
3151 p += sizeofW(StgFetchMeBlockingQueue);
3157 // shouldn't have anything else on the mutables list
3158 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3165 scavenge_static(void)
3167 StgClosure* p = static_objects;
3168 const StgInfoTable *info;
3170 /* Always evacuate straight to the oldest generation for static
3172 evac_gen = oldest_gen->no;
3174 /* keep going until we've scavenged all the objects on the linked
3176 while (p != END_OF_STATIC_LIST) {
3180 if (info->type==RBH)
3181 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3183 // make sure the info pointer is into text space
3184 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3185 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3187 /* Take this object *off* the static_objects list,
3188 * and put it on the scavenged_static_objects list.
3190 static_objects = STATIC_LINK(info,p);
3191 STATIC_LINK(info,p) = scavenged_static_objects;
3192 scavenged_static_objects = p;
3194 switch (info -> type) {
3198 StgInd *ind = (StgInd *)p;
3199 ind->indirectee = evacuate(ind->indirectee);
3201 /* might fail to evacuate it, in which case we have to pop it
3202 * back on the mutable list (and take it off the
3203 * scavenged_static list because the static link and mut link
3204 * pointers are one and the same).
3206 if (failed_to_evac) {
3207 failed_to_evac = rtsFalse;
3208 scavenged_static_objects = IND_STATIC_LINK(p);
3209 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3210 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3224 next = (P_)p->payload + info->layout.payload.ptrs;
3225 // evacuate the pointers
3226 for (q = (P_)p->payload; q < next; q++) {
3227 (StgClosure *)*q = evacuate((StgClosure *)*q);
3233 barf("scavenge_static: strange closure %d", (int)(info->type));
3236 ASSERT(failed_to_evac == rtsFalse);
3238 /* get the next static object from the list. Remember, there might
3239 * be more stuff on this list now that we've done some evacuating!
3240 * (static_objects is a global)
3246 /* -----------------------------------------------------------------------------
3247 scavenge_stack walks over a section of stack and evacuates all the
3248 objects pointed to by it. We can use the same code for walking
3249 PAPs, since these are just sections of copied stack.
3250 -------------------------------------------------------------------------- */
3253 scavenge_stack(StgPtr p, StgPtr stack_end)
3256 const StgInfoTable* info;
3259 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3262 * Each time around this loop, we are looking at a chunk of stack
3263 * that starts with either a pending argument section or an
3264 * activation record.
3267 while (p < stack_end) {
3270 // If we've got a tag, skip over that many words on the stack
3271 if (IS_ARG_TAG((W_)q)) {
3276 /* Is q a pointer to a closure?
3278 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3280 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3281 ASSERT(closure_STATIC((StgClosure *)q));
3283 // otherwise, must be a pointer into the allocation space.
3286 (StgClosure *)*p = evacuate((StgClosure *)q);
3292 * Otherwise, q must be the info pointer of an activation
3293 * record. All activation records have 'bitmap' style layout
3296 info = get_itbl((StgClosure *)p);
3298 switch (info->type) {
3300 // Dynamic bitmap: the mask is stored on the stack
3302 bitmap = ((StgRetDyn *)p)->liveness;
3303 p = (P_)&((StgRetDyn *)p)->payload[0];
3306 // probably a slow-entry point return address:
3314 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3315 old_p, p, old_p+1));
3317 p++; // what if FHS!=1 !? -- HWL
3322 /* Specialised code for update frames, since they're so common.
3323 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3324 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3328 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3330 p += sizeofW(StgUpdateFrame);
3333 frame->updatee = evacuate(frame->updatee);
3335 #else // specialised code for update frames, not sure if it's worth it.
3337 nat type = get_itbl(frame->updatee)->type;
3339 if (type == EVACUATED) {
3340 frame->updatee = evacuate(frame->updatee);
3343 bdescr *bd = Bdescr((P_)frame->updatee);
3345 if (bd->gen_no > N) {
3346 if (bd->gen_no < evac_gen) {
3347 failed_to_evac = rtsTrue;
3352 // Don't promote blackholes
3354 if (!(stp->gen_no == 0 &&
3356 stp->no == stp->gen->n_steps-1)) {
3363 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3364 sizeofW(StgHeader), stp);
3365 frame->updatee = to;
3368 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3369 frame->updatee = to;
3370 recordMutable((StgMutClosure *)to);
3373 /* will never be SE_{,CAF_}BLACKHOLE, since we
3374 don't push an update frame for single-entry thunks. KSW 1999-01. */
3375 barf("scavenge_stack: UPDATE_FRAME updatee");
3381 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3388 bitmap = info->layout.bitmap;
3390 // this assumes that the payload starts immediately after the info-ptr
3392 while (bitmap != 0) {
3393 if ((bitmap & 1) == 0) {
3394 (StgClosure *)*p = evacuate((StgClosure *)*p);
3397 bitmap = bitmap >> 1;
3404 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3409 StgLargeBitmap *large_bitmap;
3412 large_bitmap = info->layout.large_bitmap;
3415 for (i=0; i<large_bitmap->size; i++) {
3416 bitmap = large_bitmap->bitmap[i];
3417 q = p + BITS_IN(W_);
3418 while (bitmap != 0) {
3419 if ((bitmap & 1) == 0) {
3420 (StgClosure *)*p = evacuate((StgClosure *)*p);
3423 bitmap = bitmap >> 1;
3425 if (i+1 < large_bitmap->size) {
3427 (StgClosure *)*p = evacuate((StgClosure *)*p);
3433 // and don't forget to follow the SRT
3438 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3443 /*-----------------------------------------------------------------------------
3444 scavenge the large object list.
3446 evac_gen set by caller; similar games played with evac_gen as with
3447 scavenge() - see comment at the top of scavenge(). Most large
3448 objects are (repeatedly) mutable, so most of the time evac_gen will
3450 --------------------------------------------------------------------------- */
3453 scavenge_large(step *stp)
3458 bd = stp->new_large_objects;
3460 for (; bd != NULL; bd = stp->new_large_objects) {
3462 /* take this object *off* the large objects list and put it on
3463 * the scavenged large objects list. This is so that we can
3464 * treat new_large_objects as a stack and push new objects on
3465 * the front when evacuating.
3467 stp->new_large_objects = bd->link;
3468 dbl_link_onto(bd, &stp->scavenged_large_objects);
3470 // update the block count in this step.
3471 stp->n_scavenged_large_blocks += bd->blocks;
3474 if (scavenge_one(p)) {
3475 mkMutCons((StgClosure *)p, stp->gen);
3480 /* -----------------------------------------------------------------------------
3481 Initialising the static object & mutable lists
3482 -------------------------------------------------------------------------- */
3485 zero_static_object_list(StgClosure* first_static)
3489 const StgInfoTable *info;
3491 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3493 link = STATIC_LINK(info, p);
3494 STATIC_LINK(info,p) = NULL;
3498 /* This function is only needed because we share the mutable link
3499 * field with the static link field in an IND_STATIC, so we have to
3500 * zero the mut_link field before doing a major GC, which needs the
3501 * static link field.
3503 * It doesn't do any harm to zero all the mutable link fields on the
3508 zero_mutable_list( StgMutClosure *first )
3510 StgMutClosure *next, *c;
3512 for (c = first; c != END_MUT_LIST; c = next) {
3518 /* -----------------------------------------------------------------------------
3520 -------------------------------------------------------------------------- */
3527 for (c = (StgIndStatic *)caf_list; c != NULL;
3528 c = (StgIndStatic *)c->static_link)
3530 c->header.info = c->saved_info;
3531 c->saved_info = NULL;
3532 // could, but not necessary: c->static_link = NULL;
3538 markCAFs( evac_fn evac )
3542 for (c = (StgIndStatic *)caf_list; c != NULL;
3543 c = (StgIndStatic *)c->static_link)
3545 evac(&c->indirectee);
3549 /* -----------------------------------------------------------------------------
3550 Sanity code for CAF garbage collection.
3552 With DEBUG turned on, we manage a CAF list in addition to the SRT
3553 mechanism. After GC, we run down the CAF list and blackhole any
3554 CAFs which have been garbage collected. This means we get an error
3555 whenever the program tries to enter a garbage collected CAF.
3557 Any garbage collected CAFs are taken off the CAF list at the same
3559 -------------------------------------------------------------------------- */
3561 #if 0 && defined(DEBUG)
3568 const StgInfoTable *info;
3579 ASSERT(info->type == IND_STATIC);
3581 if (STATIC_LINK(info,p) == NULL) {
3582 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3584 SET_INFO(p,&stg_BLACKHOLE_info);
3585 p = STATIC_LINK2(info,p);
3589 pp = &STATIC_LINK2(info,p);
3596 // belch("%d CAFs live", i);
3601 /* -----------------------------------------------------------------------------
3604 Whenever a thread returns to the scheduler after possibly doing
3605 some work, we have to run down the stack and black-hole all the
3606 closures referred to by update frames.
3607 -------------------------------------------------------------------------- */
3610 threadLazyBlackHole(StgTSO *tso)
3612 StgUpdateFrame *update_frame;
3613 StgBlockingQueue *bh;
3616 stack_end = &tso->stack[tso->stack_size];
3617 update_frame = tso->su;
3620 switch (get_itbl(update_frame)->type) {
3623 update_frame = ((StgCatchFrame *)update_frame)->link;
3627 bh = (StgBlockingQueue *)update_frame->updatee;
3629 /* if the thunk is already blackholed, it means we've also
3630 * already blackholed the rest of the thunks on this stack,
3631 * so we can stop early.
3633 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3634 * don't interfere with this optimisation.
3636 if (bh->header.info == &stg_BLACKHOLE_info) {
3640 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3641 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3642 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3643 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3647 // We pretend that bh is now dead.
3648 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
3650 SET_INFO(bh,&stg_BLACKHOLE_info);
3653 // We pretend that bh has just been created.
3654 LDV_recordCreate(bh);
3658 update_frame = update_frame->link;
3662 update_frame = ((StgSeqFrame *)update_frame)->link;
3668 barf("threadPaused");
3674 /* -----------------------------------------------------------------------------
3677 * Code largely pinched from old RTS, then hacked to bits. We also do
3678 * lazy black holing here.
3680 * -------------------------------------------------------------------------- */
3683 threadSqueezeStack(StgTSO *tso)
3685 lnat displacement = 0;
3686 StgUpdateFrame *frame;
3687 StgUpdateFrame *next_frame; // Temporally next
3688 StgUpdateFrame *prev_frame; // Temporally previous
3690 rtsBool prev_was_update_frame;
3692 StgUpdateFrame *top_frame;
3693 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3695 void printObj( StgClosure *obj ); // from Printer.c
3697 top_frame = tso->su;
3700 bottom = &(tso->stack[tso->stack_size]);
3703 /* There must be at least one frame, namely the STOP_FRAME.
3705 ASSERT((P_)frame < bottom);
3707 /* Walk down the stack, reversing the links between frames so that
3708 * we can walk back up as we squeeze from the bottom. Note that
3709 * next_frame and prev_frame refer to next and previous as they were
3710 * added to the stack, rather than the way we see them in this
3711 * walk. (It makes the next loop less confusing.)
3713 * Stop if we find an update frame pointing to a black hole
3714 * (see comment in threadLazyBlackHole()).
3718 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3719 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3720 prev_frame = frame->link;
3721 frame->link = next_frame;
3726 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3727 printObj((StgClosure *)prev_frame);
3728 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3731 switch (get_itbl(frame)->type) {
3734 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3747 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3749 printObj((StgClosure *)prev_frame);
3752 if (get_itbl(frame)->type == UPDATE_FRAME
3753 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3758 /* Now, we're at the bottom. Frame points to the lowest update
3759 * frame on the stack, and its link actually points to the frame
3760 * above. We have to walk back up the stack, squeezing out empty
3761 * update frames and turning the pointers back around on the way
3764 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3765 * we never want to eliminate it anyway. Just walk one step up
3766 * before starting to squeeze. When you get to the topmost frame,
3767 * remember that there are still some words above it that might have
3774 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3777 * Loop through all of the frames (everything except the very
3778 * bottom). Things are complicated by the fact that we have
3779 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3780 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3782 while (frame != NULL) {
3784 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3785 rtsBool is_update_frame;
3787 next_frame = frame->link;
3788 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3791 * 1. both the previous and current frame are update frames
3792 * 2. the current frame is empty
3794 if (prev_was_update_frame && is_update_frame &&
3795 (P_)prev_frame == frame_bottom + displacement) {
3797 // Now squeeze out the current frame
3798 StgClosure *updatee_keep = prev_frame->updatee;
3799 StgClosure *updatee_bypass = frame->updatee;
3802 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3806 /* Deal with blocking queues. If both updatees have blocked
3807 * threads, then we should merge the queues into the update
3808 * frame that we're keeping.
3810 * Alternatively, we could just wake them up: they'll just go
3811 * straight to sleep on the proper blackhole! This is less code
3812 * and probably less bug prone, although it's probably much
3815 #if 0 // do it properly...
3816 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3817 # error Unimplemented lazy BH warning. (KSW 1999-01)
3819 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3820 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3822 // Sigh. It has one. Don't lose those threads!
3823 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3824 // Urgh. Two queues. Merge them.
3825 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3827 while (keep_tso->link != END_TSO_QUEUE) {
3828 keep_tso = keep_tso->link;
3830 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3833 // For simplicity, just swap the BQ for the BH
3834 P_ temp = updatee_keep;
3836 updatee_keep = updatee_bypass;
3837 updatee_bypass = temp;
3839 // Record the swap in the kept frame (below)
3840 prev_frame->updatee = updatee_keep;
3845 TICK_UPD_SQUEEZED();
3846 /* wasn't there something about update squeezing and ticky to be
3847 * sorted out? oh yes: we aren't counting each enter properly
3848 * in this case. See the log somewhere. KSW 1999-04-21
3850 * Check two things: that the two update frames don't point to
3851 * the same object, and that the updatee_bypass isn't already an
3852 * indirection. Both of these cases only happen when we're in a
3853 * block hole-style loop (and there are multiple update frames
3854 * on the stack pointing to the same closure), but they can both
3855 * screw us up if we don't check.
3857 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3858 // this wakes the threads up
3859 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3862 sp = (P_)frame - 1; // sp = stuff to slide
3863 displacement += sizeofW(StgUpdateFrame);
3866 // No squeeze for this frame
3867 sp = frame_bottom - 1; // Keep the current frame
3869 /* Do lazy black-holing.
3871 if (is_update_frame) {
3872 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3873 if (bh->header.info != &stg_BLACKHOLE_info &&
3874 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3875 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3876 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3877 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3880 /* zero out the slop so that the sanity checker can tell
3881 * where the next closure is.
3884 StgInfoTable *info = get_itbl(bh);
3885 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3886 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3887 * info is used for a different purpose, and it's exactly the
3888 * same size as a BLACKHOLE in any case.
3890 if (info->type != THUNK_SELECTOR) {
3891 for (i = np; i < np + nw; i++) {
3892 ((StgClosure *)bh)->payload[i] = 0;
3899 // We pretend that bh is now dead.
3900 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
3903 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
3905 SET_INFO(bh,&stg_BLACKHOLE_info);
3908 // We pretend that bh has just been created.
3909 LDV_recordCreate(bh);
3914 // Fix the link in the current frame (should point to the frame below)
3915 frame->link = prev_frame;
3916 prev_was_update_frame = is_update_frame;
3919 // Now slide all words from sp up to the next frame
3921 if (displacement > 0) {
3922 P_ next_frame_bottom;
3924 if (next_frame != NULL)
3925 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3927 next_frame_bottom = tso->sp - 1;
3931 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3935 while (sp >= next_frame_bottom) {
3936 sp[displacement] = *sp;
3940 (P_)prev_frame = (P_)frame + displacement;
3944 tso->sp += displacement;
3945 tso->su = prev_frame;
3948 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3949 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3954 /* -----------------------------------------------------------------------------
3957 * We have to prepare for GC - this means doing lazy black holing
3958 * here. We also take the opportunity to do stack squeezing if it's
3960 * -------------------------------------------------------------------------- */
3962 threadPaused(StgTSO *tso)
3964 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3965 threadSqueezeStack(tso); // does black holing too
3967 threadLazyBlackHole(tso);
3970 /* -----------------------------------------------------------------------------
3972 * -------------------------------------------------------------------------- */
3976 printMutOnceList(generation *gen)
3978 StgMutClosure *p, *next;
3980 p = gen->mut_once_list;
3983 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3984 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3985 fprintf(stderr, "%p (%s), ",
3986 p, info_type((StgClosure *)p));
3988 fputc('\n', stderr);
3992 printMutableList(generation *gen)
3994 StgMutClosure *p, *next;
3999 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
4000 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
4001 fprintf(stderr, "%p (%s), ",
4002 p, info_type((StgClosure *)p));
4004 fputc('\n', stderr);
4007 static inline rtsBool
4008 maybeLarge(StgClosure *closure)
4010 StgInfoTable *info = get_itbl(closure);
4012 /* closure types that may be found on the new_large_objects list;
4013 see scavenge_large */
4014 return (info->type == MUT_ARR_PTRS ||
4015 info->type == MUT_ARR_PTRS_FROZEN ||
4016 info->type == TSO ||
4017 info->type == ARR_WORDS);