1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.127 2001/11/22 14:25:12 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
15 #include "StoragePriv.h"
18 #include "SchedAPI.h" // for ReverCAFs prototype
20 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
30 #if defined(GRAN) || defined(PAR)
31 # include "GranSimRts.h"
32 # include "ParallelRts.h"
36 # include "ParallelDebug.h"
41 #if defined(RTS_GTK_FRONTPANEL)
42 #include "FrontPanel.h"
45 #include "RetainerProfile.h"
46 #include "LdvProfile.h"
48 /* STATIC OBJECT LIST.
51 * We maintain a linked list of static objects that are still live.
52 * The requirements for this list are:
54 * - we need to scan the list while adding to it, in order to
55 * scavenge all the static objects (in the same way that
56 * breadth-first scavenging works for dynamic objects).
58 * - we need to be able to tell whether an object is already on
59 * the list, to break loops.
61 * Each static object has a "static link field", which we use for
62 * linking objects on to the list. We use a stack-type list, consing
63 * objects on the front as they are added (this means that the
64 * scavenge phase is depth-first, not breadth-first, but that
67 * A separate list is kept for objects that have been scavenged
68 * already - this is so that we can zero all the marks afterwards.
70 * An object is on the list if its static link field is non-zero; this
71 * means that we have to mark the end of the list with '1', not NULL.
73 * Extra notes for generational GC:
75 * Each generation has a static object list associated with it. When
76 * collecting generations up to N, we treat the static object lists
77 * from generations > N as roots.
79 * We build up a static object list while collecting generations 0..N,
80 * which is then appended to the static object list of generation N+1.
82 StgClosure* static_objects; // live static objects
83 StgClosure* scavenged_static_objects; // static objects scavenged so far
85 /* N is the oldest generation being collected, where the generations
86 * are numbered starting at 0. A major GC (indicated by the major_gc
87 * flag) is when we're collecting all generations. We only attempt to
88 * deal with static objects and GC CAFs when doing a major GC.
91 static rtsBool major_gc;
93 /* Youngest generation that objects should be evacuated to in
94 * evacuate(). (Logically an argument to evacuate, but it's static
95 * a lot of the time so we optimise it into a global variable).
101 StgWeak *old_weak_ptr_list; // also pending finaliser list
102 static rtsBool weak_done; // all done for this pass
104 /* List of all threads during GC
106 static StgTSO *old_all_threads;
107 static StgTSO *resurrected_threads;
109 /* Flag indicating failure to evacuate an object to the desired
112 static rtsBool failed_to_evac;
114 /* Old to-space (used for two-space collector only)
116 bdescr *old_to_blocks;
118 /* Data used for allocation area sizing.
120 lnat new_blocks; // blocks allocated during this GC
121 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
123 /* Used to avoid long recursion due to selector thunks
125 lnat thunk_selector_depth = 0;
126 #define MAX_THUNK_SELECTOR_DEPTH 256
128 /* -----------------------------------------------------------------------------
129 Static function declarations
130 -------------------------------------------------------------------------- */
132 static void mark_root ( StgClosure **root );
133 static StgClosure * evacuate ( StgClosure *q );
134 static void zero_static_object_list ( StgClosure* first_static );
135 static void zero_mutable_list ( StgMutClosure *first );
137 static rtsBool traverse_weak_ptr_list ( void );
138 static void mark_weak_ptr_list ( StgWeak **list );
140 static void scavenge ( step * );
141 static void scavenge_mark_stack ( void );
142 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
143 static rtsBool scavenge_one ( StgPtr p );
144 static void scavenge_large ( step * );
145 static void scavenge_static ( void );
146 static void scavenge_mutable_list ( generation *g );
147 static void scavenge_mut_once_list ( generation *g );
149 #if 0 && defined(DEBUG)
150 static void gcCAFs ( void );
153 /* -----------------------------------------------------------------------------
154 inline functions etc. for dealing with the mark bitmap & stack.
155 -------------------------------------------------------------------------- */
157 #define MARK_STACK_BLOCKS 4
159 static bdescr *mark_stack_bdescr;
160 static StgPtr *mark_stack;
161 static StgPtr *mark_sp;
162 static StgPtr *mark_splim;
164 // Flag and pointers used for falling back to a linear scan when the
165 // mark stack overflows.
166 static rtsBool mark_stack_overflowed;
167 static bdescr *oldgen_scan_bd;
168 static StgPtr oldgen_scan;
170 static inline rtsBool
171 mark_stack_empty(void)
173 return mark_sp == mark_stack;
176 static inline rtsBool
177 mark_stack_full(void)
179 return mark_sp >= mark_splim;
183 reset_mark_stack(void)
185 mark_sp = mark_stack;
189 push_mark_stack(StgPtr p)
200 /* -----------------------------------------------------------------------------
203 For garbage collecting generation N (and all younger generations):
205 - follow all pointers in the root set. the root set includes all
206 mutable objects in all steps in all generations.
208 - for each pointer, evacuate the object it points to into either
209 + to-space in the next higher step in that generation, if one exists,
210 + if the object's generation == N, then evacuate it to the next
211 generation if one exists, or else to-space in the current
213 + if the object's generation < N, then evacuate it to to-space
214 in the next generation.
216 - repeatedly scavenge to-space from each step in each generation
217 being collected until no more objects can be evacuated.
219 - free from-space in each step, and set from-space = to-space.
221 -------------------------------------------------------------------------- */
224 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
228 lnat live, allocated, collected = 0, copied = 0;
229 lnat oldgen_saved_blocks = 0;
233 CostCentreStack *prev_CCS;
236 #if defined(DEBUG) && defined(GRAN)
237 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
241 // tell the stats department that we've started a GC
244 // Init stats and print par specific (timing) info
245 PAR_TICKY_PAR_START();
247 // attribute any costs to CCS_GC
253 /* Approximate how much we allocated.
254 * Todo: only when generating stats?
256 allocated = calcAllocated();
258 /* Figure out which generation to collect
260 if (force_major_gc) {
261 N = RtsFlags.GcFlags.generations - 1;
265 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
266 if (generations[g].steps[0].n_blocks +
267 generations[g].steps[0].n_large_blocks
268 >= generations[g].max_blocks) {
272 major_gc = (N == RtsFlags.GcFlags.generations-1);
275 #ifdef RTS_GTK_FRONTPANEL
276 if (RtsFlags.GcFlags.frontpanel) {
277 updateFrontPanelBeforeGC(N);
281 // check stack sanity *before* GC (ToDo: check all threads)
283 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
285 IF_DEBUG(sanity, checkFreeListSanity());
287 /* Initialise the static object lists
289 static_objects = END_OF_STATIC_LIST;
290 scavenged_static_objects = END_OF_STATIC_LIST;
292 /* zero the mutable list for the oldest generation (see comment by
293 * zero_mutable_list below).
296 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
299 /* Save the old to-space if we're doing a two-space collection
301 if (RtsFlags.GcFlags.generations == 1) {
302 old_to_blocks = g0s0->to_blocks;
303 g0s0->to_blocks = NULL;
306 /* Keep a count of how many new blocks we allocated during this GC
307 * (used for resizing the allocation area, later).
311 /* Initialise to-space in all the generations/steps that we're
314 for (g = 0; g <= N; g++) {
315 generations[g].mut_once_list = END_MUT_LIST;
316 generations[g].mut_list = END_MUT_LIST;
318 for (s = 0; s < generations[g].n_steps; s++) {
320 // generation 0, step 0 doesn't need to-space
321 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
325 /* Get a free block for to-space. Extra blocks will be chained on
329 stp = &generations[g].steps[s];
330 ASSERT(stp->gen_no == g);
331 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
335 bd->flags = BF_EVACUATED; // it's a to-space block
337 stp->hpLim = stp->hp + BLOCK_SIZE_W;
340 stp->n_to_blocks = 1;
341 stp->scan = bd->start;
343 stp->new_large_objects = NULL;
344 stp->scavenged_large_objects = NULL;
345 stp->n_scavenged_large_blocks = 0;
347 // mark the large objects as not evacuated yet
348 for (bd = stp->large_objects; bd; bd = bd->link) {
349 bd->flags = BF_LARGE;
352 // for a compacted step, we need to allocate the bitmap
353 if (stp->is_compacted) {
354 nat bitmap_size; // in bytes
355 bdescr *bitmap_bdescr;
358 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
360 if (bitmap_size > 0) {
361 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
363 stp->bitmap = bitmap_bdescr;
364 bitmap = bitmap_bdescr->start;
366 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
367 bitmap_size, bitmap););
369 // don't forget to fill it with zeros!
370 memset(bitmap, 0, bitmap_size);
372 // for each block in this step, point to its bitmap from the
374 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
375 bd->u.bitmap = bitmap;
376 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
383 /* make sure the older generations have at least one block to
384 * allocate into (this makes things easier for copy(), see below.
386 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
387 for (s = 0; s < generations[g].n_steps; s++) {
388 stp = &generations[g].steps[s];
389 if (stp->hp_bd == NULL) {
390 ASSERT(stp->blocks == NULL);
395 bd->flags = 0; // *not* a to-space block or a large object
397 stp->hpLim = stp->hp + BLOCK_SIZE_W;
403 /* Set the scan pointer for older generations: remember we
404 * still have to scavenge objects that have been promoted. */
406 stp->scan_bd = stp->hp_bd;
407 stp->to_blocks = NULL;
408 stp->n_to_blocks = 0;
409 stp->new_large_objects = NULL;
410 stp->scavenged_large_objects = NULL;
411 stp->n_scavenged_large_blocks = 0;
415 /* Allocate a mark stack if we're doing a major collection.
418 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
419 mark_stack = (StgPtr *)mark_stack_bdescr->start;
420 mark_sp = mark_stack;
421 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
423 mark_stack_bdescr = NULL;
426 /* -----------------------------------------------------------------------
427 * follow all the roots that we know about:
428 * - mutable lists from each generation > N
429 * we want to *scavenge* these roots, not evacuate them: they're not
430 * going to move in this GC.
431 * Also: do them in reverse generation order. This is because we
432 * often want to promote objects that are pointed to by older
433 * generations early, so we don't have to repeatedly copy them.
434 * Doing the generations in reverse order ensures that we don't end
435 * up in the situation where we want to evac an object to gen 3 and
436 * it has already been evaced to gen 2.
440 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
441 generations[g].saved_mut_list = generations[g].mut_list;
442 generations[g].mut_list = END_MUT_LIST;
445 // Do the mut-once lists first
446 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
447 IF_PAR_DEBUG(verbose,
448 printMutOnceList(&generations[g]));
449 scavenge_mut_once_list(&generations[g]);
451 for (st = generations[g].n_steps-1; st >= 0; st--) {
452 scavenge(&generations[g].steps[st]);
456 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
457 IF_PAR_DEBUG(verbose,
458 printMutableList(&generations[g]));
459 scavenge_mutable_list(&generations[g]);
461 for (st = generations[g].n_steps-1; st >= 0; st--) {
462 scavenge(&generations[g].steps[st]);
467 /* follow roots from the CAF list (used by GHCi)
472 /* follow all the roots that the application knows about.
475 get_roots(mark_root);
478 /* And don't forget to mark the TSO if we got here direct from
480 /* Not needed in a seq version?
482 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
486 // Mark the entries in the GALA table of the parallel system
487 markLocalGAs(major_gc);
488 // Mark all entries on the list of pending fetches
489 markPendingFetches(major_gc);
492 /* Mark the weak pointer list, and prepare to detect dead weak
495 mark_weak_ptr_list(&weak_ptr_list);
496 old_weak_ptr_list = weak_ptr_list;
497 weak_ptr_list = NULL;
498 weak_done = rtsFalse;
500 /* The all_threads list is like the weak_ptr_list.
501 * See traverse_weak_ptr_list() for the details.
503 old_all_threads = all_threads;
504 all_threads = END_TSO_QUEUE;
505 resurrected_threads = END_TSO_QUEUE;
507 /* Mark the stable pointer table.
509 markStablePtrTable(mark_root);
513 /* ToDo: To fix the caf leak, we need to make the commented out
514 * parts of this code do something sensible - as described in
517 extern void markHugsObjects(void);
522 /* -------------------------------------------------------------------------
523 * Repeatedly scavenge all the areas we know about until there's no
524 * more scavenging to be done.
531 // scavenge static objects
532 if (major_gc && static_objects != END_OF_STATIC_LIST) {
533 IF_DEBUG(sanity, checkStaticObjects(static_objects));
537 /* When scavenging the older generations: Objects may have been
538 * evacuated from generations <= N into older generations, and we
539 * need to scavenge these objects. We're going to try to ensure that
540 * any evacuations that occur move the objects into at least the
541 * same generation as the object being scavenged, otherwise we
542 * have to create new entries on the mutable list for the older
546 // scavenge each step in generations 0..maxgen
552 // scavenge objects in compacted generation
553 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
554 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
555 scavenge_mark_stack();
559 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
560 for (st = generations[gen].n_steps; --st >= 0; ) {
561 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
564 stp = &generations[gen].steps[st];
566 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
571 if (stp->new_large_objects != NULL) {
580 if (flag) { goto loop; }
583 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
589 // Reconstruct the Global Address tables used in GUM
590 rebuildGAtables(major_gc);
591 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
594 // Now see which stable names are still alive.
597 // Tidy the end of the to-space chains
598 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
599 for (s = 0; s < generations[g].n_steps; s++) {
600 stp = &generations[g].steps[s];
601 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
602 stp->hp_bd->free = stp->hp;
603 stp->hp_bd->link = NULL;
609 // We call processHeapClosureForDead() on every closure destroyed during
610 // the current garbage collection, so we invoke LdvCensusForDead().
611 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV)
615 // NO MORE EVACUATION AFTER THIS POINT!
616 // Finally: compaction of the oldest generation.
617 if (major_gc && oldest_gen->steps[0].is_compacted) {
618 // save number of blocks for stats
619 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
623 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
625 /* run through all the generations/steps and tidy up
627 copied = new_blocks * BLOCK_SIZE_W;
628 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
631 generations[g].collections++; // for stats
634 for (s = 0; s < generations[g].n_steps; s++) {
636 stp = &generations[g].steps[s];
638 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
639 // stats information: how much we copied
641 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
646 // for generations we collected...
649 // rough calculation of garbage collected, for stats output
650 if (stp->is_compacted) {
651 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
653 collected += stp->n_blocks * BLOCK_SIZE_W;
656 /* free old memory and shift to-space into from-space for all
657 * the collected steps (except the allocation area). These
658 * freed blocks will probaby be quickly recycled.
660 if (!(g == 0 && s == 0)) {
661 if (stp->is_compacted) {
662 // for a compacted step, just shift the new to-space
663 // onto the front of the now-compacted existing blocks.
664 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
665 bd->flags &= ~BF_EVACUATED; // now from-space
667 // tack the new blocks on the end of the existing blocks
668 if (stp->blocks == NULL) {
669 stp->blocks = stp->to_blocks;
671 for (bd = stp->blocks; bd != NULL; bd = next) {
674 bd->link = stp->to_blocks;
678 // add the new blocks to the block tally
679 stp->n_blocks += stp->n_to_blocks;
681 freeChain(stp->blocks);
682 stp->blocks = stp->to_blocks;
683 stp->n_blocks = stp->n_to_blocks;
684 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
685 bd->flags &= ~BF_EVACUATED; // now from-space
688 stp->to_blocks = NULL;
689 stp->n_to_blocks = 0;
692 /* LARGE OBJECTS. The current live large objects are chained on
693 * scavenged_large, having been moved during garbage
694 * collection from large_objects. Any objects left on
695 * large_objects list are therefore dead, so we free them here.
697 for (bd = stp->large_objects; bd != NULL; bd = next) {
703 // update the count of blocks used by large objects
704 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
705 bd->flags &= ~BF_EVACUATED;
707 stp->large_objects = stp->scavenged_large_objects;
708 stp->n_large_blocks = stp->n_scavenged_large_blocks;
711 // for older generations...
713 /* For older generations, we need to append the
714 * scavenged_large_object list (i.e. large objects that have been
715 * promoted during this GC) to the large_object list for that step.
717 for (bd = stp->scavenged_large_objects; bd; bd = next) {
719 bd->flags &= ~BF_EVACUATED;
720 dbl_link_onto(bd, &stp->large_objects);
723 // add the new blocks we promoted during this GC
724 stp->n_blocks += stp->n_to_blocks;
725 stp->n_large_blocks += stp->n_scavenged_large_blocks;
730 /* Reset the sizes of the older generations when we do a major
733 * CURRENT STRATEGY: make all generations except zero the same size.
734 * We have to stay within the maximum heap size, and leave a certain
735 * percentage of the maximum heap size available to allocate into.
737 if (major_gc && RtsFlags.GcFlags.generations > 1) {
738 nat live, size, min_alloc;
739 nat max = RtsFlags.GcFlags.maxHeapSize;
740 nat gens = RtsFlags.GcFlags.generations;
742 // live in the oldest generations
743 live = oldest_gen->steps[0].n_blocks +
744 oldest_gen->steps[0].n_large_blocks;
746 // default max size for all generations except zero
747 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
748 RtsFlags.GcFlags.minOldGenSize);
750 // minimum size for generation zero
751 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
752 RtsFlags.GcFlags.minAllocAreaSize);
754 // Auto-enable compaction when the residency reaches a
755 // certain percentage of the maximum heap size (default: 30%).
756 if (RtsFlags.GcFlags.generations > 1 &&
757 (RtsFlags.GcFlags.compact ||
759 oldest_gen->steps[0].n_blocks >
760 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
761 oldest_gen->steps[0].is_compacted = 1;
762 // fprintf(stderr,"compaction: on\n", live);
764 oldest_gen->steps[0].is_compacted = 0;
765 // fprintf(stderr,"compaction: off\n", live);
768 // if we're going to go over the maximum heap size, reduce the
769 // size of the generations accordingly. The calculation is
770 // different if compaction is turned on, because we don't need
771 // to double the space required to collect the old generation.
774 // this test is necessary to ensure that the calculations
775 // below don't have any negative results - we're working
776 // with unsigned values here.
777 if (max < min_alloc) {
781 if (oldest_gen->steps[0].is_compacted) {
782 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
783 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
786 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
787 size = (max - min_alloc) / ((gens - 1) * 2);
797 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
798 min_alloc, size, max);
801 for (g = 0; g < gens; g++) {
802 generations[g].max_blocks = size;
806 // Guess the amount of live data for stats.
809 /* Free the small objects allocated via allocate(), since this will
810 * all have been copied into G0S1 now.
812 if (small_alloc_list != NULL) {
813 freeChain(small_alloc_list);
815 small_alloc_list = NULL;
819 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
821 // Start a new pinned_object_block
822 pinned_object_block = NULL;
824 /* Free the mark stack.
826 if (mark_stack_bdescr != NULL) {
827 freeGroup(mark_stack_bdescr);
832 for (g = 0; g <= N; g++) {
833 for (s = 0; s < generations[g].n_steps; s++) {
834 stp = &generations[g].steps[s];
835 if (stp->is_compacted && stp->bitmap != NULL) {
836 freeGroup(stp->bitmap);
841 /* Two-space collector:
842 * Free the old to-space, and estimate the amount of live data.
844 if (RtsFlags.GcFlags.generations == 1) {
847 if (old_to_blocks != NULL) {
848 freeChain(old_to_blocks);
850 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
851 bd->flags = 0; // now from-space
854 /* For a two-space collector, we need to resize the nursery. */
856 /* set up a new nursery. Allocate a nursery size based on a
857 * function of the amount of live data (by default a factor of 2)
858 * Use the blocks from the old nursery if possible, freeing up any
861 * If we get near the maximum heap size, then adjust our nursery
862 * size accordingly. If the nursery is the same size as the live
863 * data (L), then we need 3L bytes. We can reduce the size of the
864 * nursery to bring the required memory down near 2L bytes.
866 * A normal 2-space collector would need 4L bytes to give the same
867 * performance we get from 3L bytes, reducing to the same
868 * performance at 2L bytes.
870 blocks = g0s0->n_to_blocks;
872 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
873 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
874 RtsFlags.GcFlags.maxHeapSize ) {
875 long adjusted_blocks; // signed on purpose
878 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
879 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
880 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
881 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
884 blocks = adjusted_blocks;
887 blocks *= RtsFlags.GcFlags.oldGenFactor;
888 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
889 blocks = RtsFlags.GcFlags.minAllocAreaSize;
892 resizeNursery(blocks);
895 /* Generational collector:
896 * If the user has given us a suggested heap size, adjust our
897 * allocation area to make best use of the memory available.
900 if (RtsFlags.GcFlags.heapSizeSuggestion) {
902 nat needed = calcNeeded(); // approx blocks needed at next GC
904 /* Guess how much will be live in generation 0 step 0 next time.
905 * A good approximation is obtained by finding the
906 * percentage of g0s0 that was live at the last minor GC.
909 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
912 /* Estimate a size for the allocation area based on the
913 * information available. We might end up going slightly under
914 * or over the suggested heap size, but we should be pretty
917 * Formula: suggested - needed
918 * ----------------------------
919 * 1 + g0s0_pcnt_kept/100
921 * where 'needed' is the amount of memory needed at the next
922 * collection for collecting all steps except g0s0.
925 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
926 (100 + (long)g0s0_pcnt_kept);
928 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
929 blocks = RtsFlags.GcFlags.minAllocAreaSize;
932 resizeNursery((nat)blocks);
935 // we might have added extra large blocks to the nursery, so
936 // resize back to minAllocAreaSize again.
937 resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
941 // mark the garbage collected CAFs as dead
942 #if 0 && defined(DEBUG) // doesn't work at the moment
943 if (major_gc) { gcCAFs(); }
947 // resetStaticObjectForRetainerProfiling() must be called before
949 resetStaticObjectForRetainerProfiling();
952 // zero the scavenged static object list
954 zero_static_object_list(scavenged_static_objects);
960 // start any pending finalizers
961 scheduleFinalizers(old_weak_ptr_list);
963 // send exceptions to any threads which were about to die
964 resurrectThreads(resurrected_threads);
966 // Update the stable pointer hash table.
967 updateStablePtrTable(major_gc);
969 // check sanity after GC
970 IF_DEBUG(sanity, checkSanity());
972 // extra GC trace info
973 IF_DEBUG(gc, statDescribeGens());
976 // symbol-table based profiling
977 /* heapCensus(to_blocks); */ /* ToDo */
980 // restore enclosing cost centre
985 // check for memory leaks if sanity checking is on
986 IF_DEBUG(sanity, memInventory());
988 #ifdef RTS_GTK_FRONTPANEL
989 if (RtsFlags.GcFlags.frontpanel) {
990 updateFrontPanelAfterGC( N, live );
994 // ok, GC over: tell the stats department what happened.
995 stat_endGC(allocated, collected, live, copied, N);
1001 /* -----------------------------------------------------------------------------
1004 traverse_weak_ptr_list is called possibly many times during garbage
1005 collection. It returns a flag indicating whether it did any work
1006 (i.e. called evacuate on any live pointers).
1008 Invariant: traverse_weak_ptr_list is called when the heap is in an
1009 idempotent state. That means that there are no pending
1010 evacuate/scavenge operations. This invariant helps the weak
1011 pointer code decide which weak pointers are dead - if there are no
1012 new live weak pointers, then all the currently unreachable ones are
1015 For generational GC: we just don't try to finalize weak pointers in
1016 older generations than the one we're collecting. This could
1017 probably be optimised by keeping per-generation lists of weak
1018 pointers, but for a few weak pointers this scheme will work.
1019 -------------------------------------------------------------------------- */
1022 traverse_weak_ptr_list(void)
1024 StgWeak *w, **last_w, *next_w;
1026 rtsBool flag = rtsFalse;
1028 if (weak_done) { return rtsFalse; }
1030 /* doesn't matter where we evacuate values/finalizers to, since
1031 * these pointers are treated as roots (iff the keys are alive).
1035 last_w = &old_weak_ptr_list;
1036 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1038 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1039 * called on a live weak pointer object. Just remove it.
1041 if (w->header.info == &stg_DEAD_WEAK_info) {
1042 next_w = ((StgDeadWeak *)w)->link;
1047 ASSERT(get_itbl(w)->type == WEAK);
1049 /* Now, check whether the key is reachable.
1051 new = isAlive(w->key);
1054 // evacuate the value and finalizer
1055 w->value = evacuate(w->value);
1056 w->finalizer = evacuate(w->finalizer);
1057 // remove this weak ptr from the old_weak_ptr list
1059 // and put it on the new weak ptr list
1061 w->link = weak_ptr_list;
1064 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1068 last_w = &(w->link);
1074 /* Now deal with the all_threads list, which behaves somewhat like
1075 * the weak ptr list. If we discover any threads that are about to
1076 * become garbage, we wake them up and administer an exception.
1079 StgTSO *t, *tmp, *next, **prev;
1081 prev = &old_all_threads;
1082 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1084 (StgClosure *)tmp = isAlive((StgClosure *)t);
1090 ASSERT(get_itbl(t)->type == TSO);
1091 switch (t->what_next) {
1092 case ThreadRelocated:
1097 case ThreadComplete:
1098 // finshed or died. The thread might still be alive, but we
1099 // don't keep it on the all_threads list. Don't forget to
1100 // stub out its global_link field.
1101 next = t->global_link;
1102 t->global_link = END_TSO_QUEUE;
1110 // not alive (yet): leave this thread on the old_all_threads list.
1111 prev = &(t->global_link);
1112 next = t->global_link;
1115 // alive: move this thread onto the all_threads list.
1116 next = t->global_link;
1117 t->global_link = all_threads;
1124 /* If we didn't make any changes, then we can go round and kill all
1125 * the dead weak pointers. The old_weak_ptr list is used as a list
1126 * of pending finalizers later on.
1128 if (flag == rtsFalse) {
1129 for (w = old_weak_ptr_list; w; w = w->link) {
1130 w->finalizer = evacuate(w->finalizer);
1133 /* And resurrect any threads which were about to become garbage.
1136 StgTSO *t, *tmp, *next;
1137 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1138 next = t->global_link;
1139 (StgClosure *)tmp = evacuate((StgClosure *)t);
1140 tmp->global_link = resurrected_threads;
1141 resurrected_threads = tmp;
1145 weak_done = rtsTrue;
1151 /* -----------------------------------------------------------------------------
1152 After GC, the live weak pointer list may have forwarding pointers
1153 on it, because a weak pointer object was evacuated after being
1154 moved to the live weak pointer list. We remove those forwarding
1157 Also, we don't consider weak pointer objects to be reachable, but
1158 we must nevertheless consider them to be "live" and retain them.
1159 Therefore any weak pointer objects which haven't as yet been
1160 evacuated need to be evacuated now.
1161 -------------------------------------------------------------------------- */
1165 mark_weak_ptr_list ( StgWeak **list )
1167 StgWeak *w, **last_w;
1170 for (w = *list; w; w = w->link) {
1171 (StgClosure *)w = evacuate((StgClosure *)w);
1173 last_w = &(w->link);
1177 /* -----------------------------------------------------------------------------
1178 isAlive determines whether the given closure is still alive (after
1179 a garbage collection) or not. It returns the new address of the
1180 closure if it is alive, or NULL otherwise.
1182 NOTE: Use it before compaction only!
1183 -------------------------------------------------------------------------- */
1187 isAlive(StgClosure *p)
1189 const StgInfoTable *info;
1196 /* ToDo: for static closures, check the static link field.
1197 * Problem here is that we sometimes don't set the link field, eg.
1198 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1203 // ignore closures in generations that we're not collecting.
1204 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1207 // large objects have an evacuated flag
1208 if (bd->flags & BF_LARGE) {
1209 if (bd->flags & BF_EVACUATED) {
1215 // check the mark bit for compacted steps
1216 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1220 switch (info->type) {
1225 case IND_OLDGEN: // rely on compatible layout with StgInd
1226 case IND_OLDGEN_PERM:
1227 // follow indirections
1228 p = ((StgInd *)p)->indirectee;
1233 return ((StgEvacuated *)p)->evacuee;
1236 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1237 p = (StgClosure *)((StgTSO *)p)->link;
1249 mark_root(StgClosure **root)
1251 *root = evacuate(*root);
1257 bdescr *bd = allocBlock();
1258 bd->gen_no = stp->gen_no;
1261 if (stp->gen_no <= N) {
1262 bd->flags = BF_EVACUATED;
1267 stp->hp_bd->free = stp->hp;
1268 stp->hp_bd->link = bd;
1269 stp->hp = bd->start;
1270 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1277 static __inline__ void
1278 upd_evacuee(StgClosure *p, StgClosure *dest)
1280 p->header.info = &stg_EVACUATED_info;
1281 ((StgEvacuated *)p)->evacuee = dest;
1285 static __inline__ StgClosure *
1286 copy(StgClosure *src, nat size, step *stp)
1291 nat size_org = size;
1294 TICK_GC_WORDS_COPIED(size);
1295 /* Find out where we're going, using the handy "to" pointer in
1296 * the step of the source object. If it turns out we need to
1297 * evacuate to an older generation, adjust it here (see comment
1300 if (stp->gen_no < evac_gen) {
1301 #ifdef NO_EAGER_PROMOTION
1302 failed_to_evac = rtsTrue;
1304 stp = &generations[evac_gen].steps[0];
1308 /* chain a new block onto the to-space for the destination step if
1311 if (stp->hp + size >= stp->hpLim) {
1315 for(to = stp->hp, from = (P_)src; size>0; --size) {
1321 upd_evacuee(src,(StgClosure *)dest);
1324 // We store the size of the just evacuated object in the LDV word so that
1325 // the profiler can guess the position of the next object later.
1326 SET_EVACUAEE_FOR_LDV(src, size_org);
1328 return (StgClosure *)dest;
1331 /* Special version of copy() for when we only want to copy the info
1332 * pointer of an object, but reserve some padding after it. This is
1333 * used to optimise evacuation of BLACKHOLEs.
1338 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1343 nat size_to_copy_org = size_to_copy;
1346 TICK_GC_WORDS_COPIED(size_to_copy);
1347 if (stp->gen_no < evac_gen) {
1348 #ifdef NO_EAGER_PROMOTION
1349 failed_to_evac = rtsTrue;
1351 stp = &generations[evac_gen].steps[0];
1355 if (stp->hp + size_to_reserve >= stp->hpLim) {
1359 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1364 stp->hp += size_to_reserve;
1365 upd_evacuee(src,(StgClosure *)dest);
1368 // We store the size of the just evacuated object in the LDV word so that
1369 // the profiler can guess the position of the next object later.
1370 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1372 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1374 if (size_to_reserve - size_to_copy_org > 0)
1375 FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1377 return (StgClosure *)dest;
1381 /* -----------------------------------------------------------------------------
1382 Evacuate a large object
1384 This just consists of removing the object from the (doubly-linked)
1385 large_alloc_list, and linking it on to the (singly-linked)
1386 new_large_objects list, from where it will be scavenged later.
1388 Convention: bd->flags has BF_EVACUATED set for a large object
1389 that has been evacuated, or unset otherwise.
1390 -------------------------------------------------------------------------- */
1394 evacuate_large(StgPtr p)
1396 bdescr *bd = Bdescr(p);
1399 // object must be at the beginning of the block (or be a ByteArray)
1400 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1401 (((W_)p & BLOCK_MASK) == 0));
1403 // already evacuated?
1404 if (bd->flags & BF_EVACUATED) {
1405 /* Don't forget to set the failed_to_evac flag if we didn't get
1406 * the desired destination (see comments in evacuate()).
1408 if (bd->gen_no < evac_gen) {
1409 failed_to_evac = rtsTrue;
1410 TICK_GC_FAILED_PROMOTION();
1416 // remove from large_object list
1418 bd->u.back->link = bd->link;
1419 } else { // first object in the list
1420 stp->large_objects = bd->link;
1423 bd->link->u.back = bd->u.back;
1426 /* link it on to the evacuated large object list of the destination step
1429 if (stp->gen_no < evac_gen) {
1430 #ifdef NO_EAGER_PROMOTION
1431 failed_to_evac = rtsTrue;
1433 stp = &generations[evac_gen].steps[0];
1438 bd->gen_no = stp->gen_no;
1439 bd->link = stp->new_large_objects;
1440 stp->new_large_objects = bd;
1441 bd->flags |= BF_EVACUATED;
1444 /* -----------------------------------------------------------------------------
1445 Adding a MUT_CONS to an older generation.
1447 This is necessary from time to time when we end up with an
1448 old-to-new generation pointer in a non-mutable object. We defer
1449 the promotion until the next GC.
1450 -------------------------------------------------------------------------- */
1454 mkMutCons(StgClosure *ptr, generation *gen)
1459 stp = &gen->steps[0];
1461 /* chain a new block onto the to-space for the destination step if
1464 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1468 q = (StgMutVar *)stp->hp;
1469 stp->hp += sizeofW(StgMutVar);
1471 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1473 recordOldToNewPtrs((StgMutClosure *)q);
1475 return (StgClosure *)q;
1478 /* -----------------------------------------------------------------------------
1481 This is called (eventually) for every live object in the system.
1483 The caller to evacuate specifies a desired generation in the
1484 evac_gen global variable. The following conditions apply to
1485 evacuating an object which resides in generation M when we're
1486 collecting up to generation N
1490 else evac to step->to
1492 if M < evac_gen evac to evac_gen, step 0
1494 if the object is already evacuated, then we check which generation
1497 if M >= evac_gen do nothing
1498 if M < evac_gen set failed_to_evac flag to indicate that we
1499 didn't manage to evacuate this object into evac_gen.
1501 -------------------------------------------------------------------------- */
1504 evacuate(StgClosure *q)
1509 const StgInfoTable *info;
1512 if (HEAP_ALLOCED(q)) {
1515 // not a group head: find the group head
1516 if (bd->blocks == 0) { bd = bd->link; }
1518 if (bd->gen_no > N) {
1519 /* Can't evacuate this object, because it's in a generation
1520 * older than the ones we're collecting. Let's hope that it's
1521 * in evac_gen or older, or we will have to arrange to track
1522 * this pointer using the mutable list.
1524 if (bd->gen_no < evac_gen) {
1526 failed_to_evac = rtsTrue;
1527 TICK_GC_FAILED_PROMOTION();
1532 /* evacuate large objects by re-linking them onto a different list.
1534 if (bd->flags & BF_LARGE) {
1536 if (info->type == TSO &&
1537 ((StgTSO *)q)->what_next == ThreadRelocated) {
1538 q = (StgClosure *)((StgTSO *)q)->link;
1541 evacuate_large((P_)q);
1545 /* If the object is in a step that we're compacting, then we
1546 * need to use an alternative evacuate procedure.
1548 if (bd->step->is_compacted) {
1549 if (!is_marked((P_)q,bd)) {
1551 if (mark_stack_full()) {
1552 mark_stack_overflowed = rtsTrue;
1555 push_mark_stack((P_)q);
1563 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1566 // make sure the info pointer is into text space
1567 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1568 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1571 switch (info -> type) {
1575 to = copy(q,sizeW_fromITBL(info),stp);
1580 StgWord w = (StgWord)q->payload[0];
1581 if (q->header.info == Czh_con_info &&
1582 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1583 (StgChar)w <= MAX_CHARLIKE) {
1584 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1586 if (q->header.info == Izh_con_info &&
1587 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1588 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1590 // else, fall through ...
1596 return copy(q,sizeofW(StgHeader)+1,stp);
1598 case THUNK_1_0: // here because of MIN_UPD_SIZE
1603 #ifdef NO_PROMOTE_THUNKS
1604 if (bd->gen_no == 0 &&
1605 bd->step->no != 0 &&
1606 bd->step->no == generations[bd->gen_no].n_steps-1) {
1610 return copy(q,sizeofW(StgHeader)+2,stp);
1618 return copy(q,sizeofW(StgHeader)+2,stp);
1624 case IND_OLDGEN_PERM:
1629 return copy(q,sizeW_fromITBL(info),stp);
1632 case SE_CAF_BLACKHOLE:
1635 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1638 to = copy(q,BLACKHOLE_sizeW(),stp);
1641 case THUNK_SELECTOR:
1643 const StgInfoTable* selectee_info;
1644 StgClosure* selectee = ((StgSelector*)q)->selectee;
1647 selectee_info = get_itbl(selectee);
1648 switch (selectee_info->type) {
1656 case CONSTR_NOCAF_STATIC:
1658 StgWord offset = info->layout.selector_offset;
1660 // check that the size is in range
1662 (StgWord32)(selectee_info->layout.payload.ptrs +
1663 selectee_info->layout.payload.nptrs));
1665 // perform the selection!
1666 q = selectee->payload[offset];
1668 /* if we're already in to-space, there's no need to continue
1669 * with the evacuation, just update the source address with
1670 * a pointer to the (evacuated) constructor field.
1672 if (HEAP_ALLOCED(q)) {
1673 bdescr *bd = Bdescr((P_)q);
1674 if (bd->flags & BF_EVACUATED) {
1675 if (bd->gen_no < evac_gen) {
1676 failed_to_evac = rtsTrue;
1677 TICK_GC_FAILED_PROMOTION();
1683 /* otherwise, carry on and evacuate this constructor field,
1684 * (but not the constructor itself)
1693 case IND_OLDGEN_PERM:
1694 selectee = ((StgInd *)selectee)->indirectee;
1698 selectee = ((StgEvacuated *)selectee)->evacuee;
1701 case THUNK_SELECTOR:
1703 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1704 something) to go into an infinite loop when the nightly
1705 stage2 compiles PrelTup.lhs. */
1707 /* we can't recurse indefinitely in evacuate(), so set a
1708 * limit on the number of times we can go around this
1711 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1713 bd = Bdescr((P_)selectee);
1714 if (!bd->flags & BF_EVACUATED) {
1715 thunk_selector_depth++;
1716 selectee = evacuate(selectee);
1717 thunk_selector_depth--;
1721 // otherwise, fall through...
1733 case SE_CAF_BLACKHOLE:
1737 // not evaluated yet
1741 // a copy of the top-level cases below
1742 case RBH: // cf. BLACKHOLE_BQ
1744 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1745 to = copy(q,BLACKHOLE_sizeW(),stp);
1746 //ToDo: derive size etc from reverted IP
1747 //to = copy(q,size,stp);
1748 // recordMutable((StgMutClosure *)to);
1753 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1754 to = copy(q,sizeofW(StgBlockedFetch),stp);
1761 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1762 to = copy(q,sizeofW(StgFetchMe),stp);
1766 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1767 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1772 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1773 (int)(selectee_info->type));
1776 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1780 // follow chains of indirections, don't evacuate them
1781 q = ((StgInd*)q)->indirectee;
1785 if (info->srt_len > 0 && major_gc &&
1786 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1787 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1788 static_objects = (StgClosure *)q;
1793 if (info->srt_len > 0 && major_gc &&
1794 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1795 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1796 static_objects = (StgClosure *)q;
1801 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1802 * on the CAF list, so don't do anything with it here (we'll
1803 * scavenge it later).
1806 && ((StgIndStatic *)q)->saved_info == NULL
1807 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1808 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1809 static_objects = (StgClosure *)q;
1814 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1815 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1816 static_objects = (StgClosure *)q;
1820 case CONSTR_INTLIKE:
1821 case CONSTR_CHARLIKE:
1822 case CONSTR_NOCAF_STATIC:
1823 /* no need to put these on the static linked list, they don't need
1838 // shouldn't see these
1839 barf("evacuate: stack frame at %p\n", q);
1843 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1844 * of stack, tagging and all.
1846 return copy(q,pap_sizeW((StgPAP*)q),stp);
1849 /* Already evacuated, just return the forwarding address.
1850 * HOWEVER: if the requested destination generation (evac_gen) is
1851 * older than the actual generation (because the object was
1852 * already evacuated to a younger generation) then we have to
1853 * set the failed_to_evac flag to indicate that we couldn't
1854 * manage to promote the object to the desired generation.
1856 if (evac_gen > 0) { // optimisation
1857 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1858 if (Bdescr((P_)p)->gen_no < evac_gen) {
1859 failed_to_evac = rtsTrue;
1860 TICK_GC_FAILED_PROMOTION();
1863 return ((StgEvacuated*)q)->evacuee;
1866 // just copy the block
1867 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1870 case MUT_ARR_PTRS_FROZEN:
1871 // just copy the block
1872 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1876 StgTSO *tso = (StgTSO *)q;
1878 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1880 if (tso->what_next == ThreadRelocated) {
1881 q = (StgClosure *)tso->link;
1885 /* To evacuate a small TSO, we need to relocate the update frame
1889 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1890 move_TSO(tso, new_tso);
1891 return (StgClosure *)new_tso;
1896 case RBH: // cf. BLACKHOLE_BQ
1898 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1899 to = copy(q,BLACKHOLE_sizeW(),stp);
1900 //ToDo: derive size etc from reverted IP
1901 //to = copy(q,size,stp);
1903 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1904 q, info_type(q), to, info_type(to)));
1909 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1910 to = copy(q,sizeofW(StgBlockedFetch),stp);
1912 belch("@@ evacuate: %p (%s) to %p (%s)",
1913 q, info_type(q), to, info_type(to)));
1920 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1921 to = copy(q,sizeofW(StgFetchMe),stp);
1923 belch("@@ evacuate: %p (%s) to %p (%s)",
1924 q, info_type(q), to, info_type(to)));
1928 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1929 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1931 belch("@@ evacuate: %p (%s) to %p (%s)",
1932 q, info_type(q), to, info_type(to)));
1937 barf("evacuate: strange closure type %d", (int)(info->type));
1943 /* -----------------------------------------------------------------------------
1944 move_TSO is called to update the TSO structure after it has been
1945 moved from one place to another.
1946 -------------------------------------------------------------------------- */
1949 move_TSO(StgTSO *src, StgTSO *dest)
1953 // relocate the stack pointers...
1954 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1955 dest->sp = (StgPtr)dest->sp + diff;
1956 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1958 relocate_stack(dest, diff);
1961 /* -----------------------------------------------------------------------------
1962 relocate_stack is called to update the linkage between
1963 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1965 -------------------------------------------------------------------------- */
1968 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1976 while ((P_)su < dest->stack + dest->stack_size) {
1977 switch (get_itbl(su)->type) {
1979 // GCC actually manages to common up these three cases!
1982 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1987 cf = (StgCatchFrame *)su;
1988 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1993 sf = (StgSeqFrame *)su;
1994 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
2003 barf("relocate_stack %d", (int)(get_itbl(su)->type));
2014 scavenge_srt(const StgInfoTable *info)
2016 StgClosure **srt, **srt_end;
2018 /* evacuate the SRT. If srt_len is zero, then there isn't an
2019 * srt field in the info table. That's ok, because we'll
2020 * never dereference it.
2022 srt = (StgClosure **)(info->srt);
2023 srt_end = srt + info->srt_len;
2024 for (; srt < srt_end; srt++) {
2025 /* Special-case to handle references to closures hiding out in DLLs, since
2026 double indirections required to get at those. The code generator knows
2027 which is which when generating the SRT, so it stores the (indirect)
2028 reference to the DLL closure in the table by first adding one to it.
2029 We check for this here, and undo the addition before evacuating it.
2031 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2032 closure that's fixed at link-time, and no extra magic is required.
2034 #ifdef ENABLE_WIN32_DLL_SUPPORT
2035 if ( (unsigned long)(*srt) & 0x1 ) {
2036 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2046 /* -----------------------------------------------------------------------------
2048 -------------------------------------------------------------------------- */
2051 scavengeTSO (StgTSO *tso)
2053 // chase the link field for any TSOs on the same queue
2054 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2055 if ( tso->why_blocked == BlockedOnMVar
2056 || tso->why_blocked == BlockedOnBlackHole
2057 || tso->why_blocked == BlockedOnException
2059 || tso->why_blocked == BlockedOnGA
2060 || tso->why_blocked == BlockedOnGA_NoSend
2063 tso->block_info.closure = evacuate(tso->block_info.closure);
2065 if ( tso->blocked_exceptions != NULL ) {
2066 tso->blocked_exceptions =
2067 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2069 // scavenge this thread's stack
2070 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2073 /* -----------------------------------------------------------------------------
2074 Scavenge a given step until there are no more objects in this step
2077 evac_gen is set by the caller to be either zero (for a step in a
2078 generation < N) or G where G is the generation of the step being
2081 We sometimes temporarily change evac_gen back to zero if we're
2082 scavenging a mutable object where early promotion isn't such a good
2084 -------------------------------------------------------------------------- */
2092 nat saved_evac_gen = evac_gen;
2097 failed_to_evac = rtsFalse;
2099 /* scavenge phase - standard breadth-first scavenging of the
2103 while (bd != stp->hp_bd || p < stp->hp) {
2105 // If we're at the end of this block, move on to the next block
2106 if (bd != stp->hp_bd && p == bd->free) {
2112 info = get_itbl((StgClosure *)p);
2113 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2116 switch (info->type) {
2119 /* treat MVars specially, because we don't want to evacuate the
2120 * mut_link field in the middle of the closure.
2123 StgMVar *mvar = ((StgMVar *)p);
2125 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2126 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2127 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2128 evac_gen = saved_evac_gen;
2129 recordMutable((StgMutClosure *)mvar);
2130 failed_to_evac = rtsFalse; // mutable.
2131 p += sizeofW(StgMVar);
2139 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2140 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2141 p += sizeofW(StgHeader) + 2;
2146 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2147 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2153 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2154 p += sizeofW(StgHeader) + 1;
2159 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2165 p += sizeofW(StgHeader) + 1;
2172 p += sizeofW(StgHeader) + 2;
2179 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2180 p += sizeofW(StgHeader) + 2;
2196 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2197 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2198 (StgClosure *)*p = evacuate((StgClosure *)*p);
2200 p += info->layout.payload.nptrs;
2205 if (stp->gen->no != 0) {
2208 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2209 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2210 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2213 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
2215 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2218 // We pretend that p has just been created.
2219 LDV_recordCreate((StgClosure *)p);
2223 case IND_OLDGEN_PERM:
2224 ((StgIndOldGen *)p)->indirectee =
2225 evacuate(((StgIndOldGen *)p)->indirectee);
2226 if (failed_to_evac) {
2227 failed_to_evac = rtsFalse;
2228 recordOldToNewPtrs((StgMutClosure *)p);
2230 p += sizeofW(StgIndOldGen);
2235 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2236 evac_gen = saved_evac_gen;
2237 recordMutable((StgMutClosure *)p);
2238 failed_to_evac = rtsFalse; // mutable anyhow
2239 p += sizeofW(StgMutVar);
2244 failed_to_evac = rtsFalse; // mutable anyhow
2245 p += sizeofW(StgMutVar);
2249 case SE_CAF_BLACKHOLE:
2252 p += BLACKHOLE_sizeW();
2257 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2258 (StgClosure *)bh->blocking_queue =
2259 evacuate((StgClosure *)bh->blocking_queue);
2260 recordMutable((StgMutClosure *)bh);
2261 failed_to_evac = rtsFalse;
2262 p += BLACKHOLE_sizeW();
2266 case THUNK_SELECTOR:
2268 StgSelector *s = (StgSelector *)p;
2269 s->selectee = evacuate(s->selectee);
2270 p += THUNK_SELECTOR_sizeW();
2274 case AP_UPD: // same as PAPs
2276 /* Treat a PAP just like a section of stack, not forgetting to
2277 * evacuate the function pointer too...
2280 StgPAP* pap = (StgPAP *)p;
2282 pap->fun = evacuate(pap->fun);
2283 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2284 p += pap_sizeW(pap);
2289 // nothing to follow
2290 p += arr_words_sizeW((StgArrWords *)p);
2294 // follow everything
2298 evac_gen = 0; // repeatedly mutable
2299 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2300 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2301 (StgClosure *)*p = evacuate((StgClosure *)*p);
2303 evac_gen = saved_evac_gen;
2304 recordMutable((StgMutClosure *)q);
2305 failed_to_evac = rtsFalse; // mutable anyhow.
2309 case MUT_ARR_PTRS_FROZEN:
2310 // follow everything
2314 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2315 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2316 (StgClosure *)*p = evacuate((StgClosure *)*p);
2318 // it's tempting to recordMutable() if failed_to_evac is
2319 // false, but that breaks some assumptions (eg. every
2320 // closure on the mutable list is supposed to have the MUT
2321 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2327 StgTSO *tso = (StgTSO *)p;
2330 evac_gen = saved_evac_gen;
2331 recordMutable((StgMutClosure *)tso);
2332 failed_to_evac = rtsFalse; // mutable anyhow.
2333 p += tso_sizeW(tso);
2338 case RBH: // cf. BLACKHOLE_BQ
2341 nat size, ptrs, nonptrs, vhs;
2343 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2345 StgRBH *rbh = (StgRBH *)p;
2346 (StgClosure *)rbh->blocking_queue =
2347 evacuate((StgClosure *)rbh->blocking_queue);
2348 recordMutable((StgMutClosure *)to);
2349 failed_to_evac = rtsFalse; // mutable anyhow.
2351 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2352 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2353 // ToDo: use size of reverted closure here!
2354 p += BLACKHOLE_sizeW();
2360 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2361 // follow the pointer to the node which is being demanded
2362 (StgClosure *)bf->node =
2363 evacuate((StgClosure *)bf->node);
2364 // follow the link to the rest of the blocking queue
2365 (StgClosure *)bf->link =
2366 evacuate((StgClosure *)bf->link);
2367 if (failed_to_evac) {
2368 failed_to_evac = rtsFalse;
2369 recordMutable((StgMutClosure *)bf);
2372 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2373 bf, info_type((StgClosure *)bf),
2374 bf->node, info_type(bf->node)));
2375 p += sizeofW(StgBlockedFetch);
2383 p += sizeofW(StgFetchMe);
2384 break; // nothing to do in this case
2386 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2388 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2389 (StgClosure *)fmbq->blocking_queue =
2390 evacuate((StgClosure *)fmbq->blocking_queue);
2391 if (failed_to_evac) {
2392 failed_to_evac = rtsFalse;
2393 recordMutable((StgMutClosure *)fmbq);
2396 belch("@@ scavenge: %p (%s) exciting, isn't it",
2397 p, info_type((StgClosure *)p)));
2398 p += sizeofW(StgFetchMeBlockingQueue);
2404 barf("scavenge: unimplemented/strange closure type %d @ %p",
2408 /* If we didn't manage to promote all the objects pointed to by
2409 * the current object, then we have to designate this object as
2410 * mutable (because it contains old-to-new generation pointers).
2412 if (failed_to_evac) {
2413 failed_to_evac = rtsFalse;
2414 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2422 /* -----------------------------------------------------------------------------
2423 Scavenge everything on the mark stack.
2425 This is slightly different from scavenge():
2426 - we don't walk linearly through the objects, so the scavenger
2427 doesn't need to advance the pointer on to the next object.
2428 -------------------------------------------------------------------------- */
2431 scavenge_mark_stack(void)
2437 evac_gen = oldest_gen->no;
2438 saved_evac_gen = evac_gen;
2441 while (!mark_stack_empty()) {
2442 p = pop_mark_stack();
2444 info = get_itbl((StgClosure *)p);
2445 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2448 switch (info->type) {
2451 /* treat MVars specially, because we don't want to evacuate the
2452 * mut_link field in the middle of the closure.
2455 StgMVar *mvar = ((StgMVar *)p);
2457 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2458 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2459 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2460 evac_gen = saved_evac_gen;
2461 failed_to_evac = rtsFalse; // mutable.
2469 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2470 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2480 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2505 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2506 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2507 (StgClosure *)*p = evacuate((StgClosure *)*p);
2513 // don't need to do anything here: the only possible case
2514 // is that we're in a 1-space compacting collector, with
2515 // no "old" generation.
2519 case IND_OLDGEN_PERM:
2520 ((StgIndOldGen *)p)->indirectee =
2521 evacuate(((StgIndOldGen *)p)->indirectee);
2522 if (failed_to_evac) {
2523 recordOldToNewPtrs((StgMutClosure *)p);
2525 failed_to_evac = rtsFalse;
2530 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2531 evac_gen = saved_evac_gen;
2532 failed_to_evac = rtsFalse;
2537 failed_to_evac = rtsFalse;
2541 case SE_CAF_BLACKHOLE:
2549 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2550 (StgClosure *)bh->blocking_queue =
2551 evacuate((StgClosure *)bh->blocking_queue);
2552 failed_to_evac = rtsFalse;
2556 case THUNK_SELECTOR:
2558 StgSelector *s = (StgSelector *)p;
2559 s->selectee = evacuate(s->selectee);
2563 case AP_UPD: // same as PAPs
2565 /* Treat a PAP just like a section of stack, not forgetting to
2566 * evacuate the function pointer too...
2569 StgPAP* pap = (StgPAP *)p;
2571 pap->fun = evacuate(pap->fun);
2572 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2577 // follow everything
2581 evac_gen = 0; // repeatedly mutable
2582 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2583 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2584 (StgClosure *)*p = evacuate((StgClosure *)*p);
2586 evac_gen = saved_evac_gen;
2587 failed_to_evac = rtsFalse; // mutable anyhow.
2591 case MUT_ARR_PTRS_FROZEN:
2592 // follow everything
2596 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2597 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2598 (StgClosure *)*p = evacuate((StgClosure *)*p);
2605 StgTSO *tso = (StgTSO *)p;
2608 evac_gen = saved_evac_gen;
2609 failed_to_evac = rtsFalse;
2614 case RBH: // cf. BLACKHOLE_BQ
2617 nat size, ptrs, nonptrs, vhs;
2619 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2621 StgRBH *rbh = (StgRBH *)p;
2622 (StgClosure *)rbh->blocking_queue =
2623 evacuate((StgClosure *)rbh->blocking_queue);
2624 recordMutable((StgMutClosure *)rbh);
2625 failed_to_evac = rtsFalse; // mutable anyhow.
2627 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2628 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2634 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2635 // follow the pointer to the node which is being demanded
2636 (StgClosure *)bf->node =
2637 evacuate((StgClosure *)bf->node);
2638 // follow the link to the rest of the blocking queue
2639 (StgClosure *)bf->link =
2640 evacuate((StgClosure *)bf->link);
2641 if (failed_to_evac) {
2642 failed_to_evac = rtsFalse;
2643 recordMutable((StgMutClosure *)bf);
2646 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2647 bf, info_type((StgClosure *)bf),
2648 bf->node, info_type(bf->node)));
2656 break; // nothing to do in this case
2658 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2660 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2661 (StgClosure *)fmbq->blocking_queue =
2662 evacuate((StgClosure *)fmbq->blocking_queue);
2663 if (failed_to_evac) {
2664 failed_to_evac = rtsFalse;
2665 recordMutable((StgMutClosure *)fmbq);
2668 belch("@@ scavenge: %p (%s) exciting, isn't it",
2669 p, info_type((StgClosure *)p)));
2675 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2679 if (failed_to_evac) {
2680 failed_to_evac = rtsFalse;
2681 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2684 // mark the next bit to indicate "scavenged"
2685 mark(q+1, Bdescr(q));
2687 } // while (!mark_stack_empty())
2689 // start a new linear scan if the mark stack overflowed at some point
2690 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2691 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2692 mark_stack_overflowed = rtsFalse;
2693 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2694 oldgen_scan = oldgen_scan_bd->start;
2697 if (oldgen_scan_bd) {
2698 // push a new thing on the mark stack
2700 // find a closure that is marked but not scavenged, and start
2702 while (oldgen_scan < oldgen_scan_bd->free
2703 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2707 if (oldgen_scan < oldgen_scan_bd->free) {
2709 // already scavenged?
2710 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2711 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2714 push_mark_stack(oldgen_scan);
2715 // ToDo: bump the linear scan by the actual size of the object
2716 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2720 oldgen_scan_bd = oldgen_scan_bd->link;
2721 if (oldgen_scan_bd != NULL) {
2722 oldgen_scan = oldgen_scan_bd->start;
2728 /* -----------------------------------------------------------------------------
2729 Scavenge one object.
2731 This is used for objects that are temporarily marked as mutable
2732 because they contain old-to-new generation pointers. Only certain
2733 objects can have this property.
2734 -------------------------------------------------------------------------- */
2737 scavenge_one(StgPtr p)
2739 const StgInfoTable *info;
2740 nat saved_evac_gen = evac_gen;
2743 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2744 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2746 info = get_itbl((StgClosure *)p);
2748 switch (info->type) {
2751 case FUN_1_0: // hardly worth specialising these guys
2771 case IND_OLDGEN_PERM:
2775 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2776 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2777 (StgClosure *)*q = evacuate((StgClosure *)*q);
2783 case SE_CAF_BLACKHOLE:
2788 case THUNK_SELECTOR:
2790 StgSelector *s = (StgSelector *)p;
2791 s->selectee = evacuate(s->selectee);
2796 // nothing to follow
2801 // follow everything
2804 evac_gen = 0; // repeatedly mutable
2805 recordMutable((StgMutClosure *)p);
2806 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2807 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2808 (StgClosure *)*p = evacuate((StgClosure *)*p);
2810 evac_gen = saved_evac_gen;
2811 failed_to_evac = rtsFalse;
2815 case MUT_ARR_PTRS_FROZEN:
2817 // follow everything
2820 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2821 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2822 (StgClosure *)*p = evacuate((StgClosure *)*p);
2829 StgTSO *tso = (StgTSO *)p;
2831 evac_gen = 0; // repeatedly mutable
2833 recordMutable((StgMutClosure *)tso);
2834 evac_gen = saved_evac_gen;
2835 failed_to_evac = rtsFalse;
2842 StgPAP* pap = (StgPAP *)p;
2843 pap->fun = evacuate(pap->fun);
2844 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2849 // This might happen if for instance a MUT_CONS was pointing to a
2850 // THUNK which has since been updated. The IND_OLDGEN will
2851 // be on the mutable list anyway, so we don't need to do anything
2856 barf("scavenge_one: strange object %d", (int)(info->type));
2859 no_luck = failed_to_evac;
2860 failed_to_evac = rtsFalse;
2864 /* -----------------------------------------------------------------------------
2865 Scavenging mutable lists.
2867 We treat the mutable list of each generation > N (i.e. all the
2868 generations older than the one being collected) as roots. We also
2869 remove non-mutable objects from the mutable list at this point.
2870 -------------------------------------------------------------------------- */
2873 scavenge_mut_once_list(generation *gen)
2875 const StgInfoTable *info;
2876 StgMutClosure *p, *next, *new_list;
2878 p = gen->mut_once_list;
2879 new_list = END_MUT_LIST;
2883 failed_to_evac = rtsFalse;
2885 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2887 // make sure the info pointer is into text space
2888 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2889 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2893 if (info->type==RBH)
2894 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2896 switch(info->type) {
2899 case IND_OLDGEN_PERM:
2901 /* Try to pull the indirectee into this generation, so we can
2902 * remove the indirection from the mutable list.
2904 ((StgIndOldGen *)p)->indirectee =
2905 evacuate(((StgIndOldGen *)p)->indirectee);
2907 #if 0 && defined(DEBUG)
2908 if (RtsFlags.DebugFlags.gc)
2909 /* Debugging code to print out the size of the thing we just
2913 StgPtr start = gen->steps[0].scan;
2914 bdescr *start_bd = gen->steps[0].scan_bd;
2916 scavenge(&gen->steps[0]);
2917 if (start_bd != gen->steps[0].scan_bd) {
2918 size += (P_)BLOCK_ROUND_UP(start) - start;
2919 start_bd = start_bd->link;
2920 while (start_bd != gen->steps[0].scan_bd) {
2921 size += BLOCK_SIZE_W;
2922 start_bd = start_bd->link;
2924 size += gen->steps[0].scan -
2925 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2927 size = gen->steps[0].scan - start;
2929 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2933 /* failed_to_evac might happen if we've got more than two
2934 * generations, we're collecting only generation 0, the
2935 * indirection resides in generation 2 and the indirectee is
2938 if (failed_to_evac) {
2939 failed_to_evac = rtsFalse;
2940 p->mut_link = new_list;
2943 /* the mut_link field of an IND_STATIC is overloaded as the
2944 * static link field too (it just so happens that we don't need
2945 * both at the same time), so we need to NULL it out when
2946 * removing this object from the mutable list because the static
2947 * link fields are all assumed to be NULL before doing a major
2955 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2956 * it from the mutable list if possible by promoting whatever it
2959 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2960 /* didn't manage to promote everything, so put the
2961 * MUT_CONS back on the list.
2963 p->mut_link = new_list;
2969 // shouldn't have anything else on the mutables list
2970 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2974 gen->mut_once_list = new_list;
2979 scavenge_mutable_list(generation *gen)
2981 const StgInfoTable *info;
2982 StgMutClosure *p, *next;
2984 p = gen->saved_mut_list;
2988 failed_to_evac = rtsFalse;
2990 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2992 // make sure the info pointer is into text space
2993 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2994 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2998 if (info->type==RBH)
2999 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3001 switch(info->type) {
3004 // follow everything
3005 p->mut_link = gen->mut_list;
3010 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3011 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3012 (StgClosure *)*q = evacuate((StgClosure *)*q);
3017 // Happens if a MUT_ARR_PTRS in the old generation is frozen
3018 case MUT_ARR_PTRS_FROZEN:
3023 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3024 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3025 (StgClosure *)*q = evacuate((StgClosure *)*q);
3029 if (failed_to_evac) {
3030 failed_to_evac = rtsFalse;
3031 mkMutCons((StgClosure *)p, gen);
3037 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3038 p->mut_link = gen->mut_list;
3044 StgMVar *mvar = (StgMVar *)p;
3045 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
3046 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
3047 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
3048 p->mut_link = gen->mut_list;
3055 StgTSO *tso = (StgTSO *)p;
3059 /* Don't take this TSO off the mutable list - it might still
3060 * point to some younger objects (because we set evac_gen to 0
3063 tso->mut_link = gen->mut_list;
3064 gen->mut_list = (StgMutClosure *)tso;
3070 StgBlockingQueue *bh = (StgBlockingQueue *)p;
3071 (StgClosure *)bh->blocking_queue =
3072 evacuate((StgClosure *)bh->blocking_queue);
3073 p->mut_link = gen->mut_list;
3078 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3081 case IND_OLDGEN_PERM:
3082 /* Try to pull the indirectee into this generation, so we can
3083 * remove the indirection from the mutable list.
3086 ((StgIndOldGen *)p)->indirectee =
3087 evacuate(((StgIndOldGen *)p)->indirectee);
3090 if (failed_to_evac) {
3091 failed_to_evac = rtsFalse;
3092 p->mut_link = gen->mut_once_list;
3093 gen->mut_once_list = p;
3100 // HWL: check whether all of these are necessary
3102 case RBH: // cf. BLACKHOLE_BQ
3104 // nat size, ptrs, nonptrs, vhs;
3106 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3107 StgRBH *rbh = (StgRBH *)p;
3108 (StgClosure *)rbh->blocking_queue =
3109 evacuate((StgClosure *)rbh->blocking_queue);
3110 if (failed_to_evac) {
3111 failed_to_evac = rtsFalse;
3112 recordMutable((StgMutClosure *)rbh);
3114 // ToDo: use size of reverted closure here!
3115 p += BLACKHOLE_sizeW();
3121 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3122 // follow the pointer to the node which is being demanded
3123 (StgClosure *)bf->node =
3124 evacuate((StgClosure *)bf->node);
3125 // follow the link to the rest of the blocking queue
3126 (StgClosure *)bf->link =
3127 evacuate((StgClosure *)bf->link);
3128 if (failed_to_evac) {
3129 failed_to_evac = rtsFalse;
3130 recordMutable((StgMutClosure *)bf);
3132 p += sizeofW(StgBlockedFetch);
3138 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3141 p += sizeofW(StgFetchMe);
3142 break; // nothing to do in this case
3144 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3146 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3147 (StgClosure *)fmbq->blocking_queue =
3148 evacuate((StgClosure *)fmbq->blocking_queue);
3149 if (failed_to_evac) {
3150 failed_to_evac = rtsFalse;
3151 recordMutable((StgMutClosure *)fmbq);
3153 p += sizeofW(StgFetchMeBlockingQueue);
3159 // shouldn't have anything else on the mutables list
3160 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3167 scavenge_static(void)
3169 StgClosure* p = static_objects;
3170 const StgInfoTable *info;
3172 /* Always evacuate straight to the oldest generation for static
3174 evac_gen = oldest_gen->no;
3176 /* keep going until we've scavenged all the objects on the linked
3178 while (p != END_OF_STATIC_LIST) {
3182 if (info->type==RBH)
3183 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3185 // make sure the info pointer is into text space
3186 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3187 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3189 /* Take this object *off* the static_objects list,
3190 * and put it on the scavenged_static_objects list.
3192 static_objects = STATIC_LINK(info,p);
3193 STATIC_LINK(info,p) = scavenged_static_objects;
3194 scavenged_static_objects = p;
3196 switch (info -> type) {
3200 StgInd *ind = (StgInd *)p;
3201 ind->indirectee = evacuate(ind->indirectee);
3203 /* might fail to evacuate it, in which case we have to pop it
3204 * back on the mutable list (and take it off the
3205 * scavenged_static list because the static link and mut link
3206 * pointers are one and the same).
3208 if (failed_to_evac) {
3209 failed_to_evac = rtsFalse;
3210 scavenged_static_objects = IND_STATIC_LINK(p);
3211 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3212 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3226 next = (P_)p->payload + info->layout.payload.ptrs;
3227 // evacuate the pointers
3228 for (q = (P_)p->payload; q < next; q++) {
3229 (StgClosure *)*q = evacuate((StgClosure *)*q);
3235 barf("scavenge_static: strange closure %d", (int)(info->type));
3238 ASSERT(failed_to_evac == rtsFalse);
3240 /* get the next static object from the list. Remember, there might
3241 * be more stuff on this list now that we've done some evacuating!
3242 * (static_objects is a global)
3248 /* -----------------------------------------------------------------------------
3249 scavenge_stack walks over a section of stack and evacuates all the
3250 objects pointed to by it. We can use the same code for walking
3251 PAPs, since these are just sections of copied stack.
3252 -------------------------------------------------------------------------- */
3255 scavenge_stack(StgPtr p, StgPtr stack_end)
3258 const StgInfoTable* info;
3261 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3264 * Each time around this loop, we are looking at a chunk of stack
3265 * that starts with either a pending argument section or an
3266 * activation record.
3269 while (p < stack_end) {
3272 // If we've got a tag, skip over that many words on the stack
3273 if (IS_ARG_TAG((W_)q)) {
3278 /* Is q a pointer to a closure?
3280 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3282 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3283 ASSERT(closure_STATIC((StgClosure *)q));
3285 // otherwise, must be a pointer into the allocation space.
3288 (StgClosure *)*p = evacuate((StgClosure *)q);
3294 * Otherwise, q must be the info pointer of an activation
3295 * record. All activation records have 'bitmap' style layout
3298 info = get_itbl((StgClosure *)p);
3300 switch (info->type) {
3302 // Dynamic bitmap: the mask is stored on the stack
3304 bitmap = ((StgRetDyn *)p)->liveness;
3305 p = (P_)&((StgRetDyn *)p)->payload[0];
3308 // probably a slow-entry point return address:
3316 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3317 old_p, p, old_p+1));
3319 p++; // what if FHS!=1 !? -- HWL
3324 /* Specialised code for update frames, since they're so common.
3325 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3326 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3330 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3332 p += sizeofW(StgUpdateFrame);
3335 frame->updatee = evacuate(frame->updatee);
3337 #else // specialised code for update frames, not sure if it's worth it.
3339 nat type = get_itbl(frame->updatee)->type;
3341 if (type == EVACUATED) {
3342 frame->updatee = evacuate(frame->updatee);
3345 bdescr *bd = Bdescr((P_)frame->updatee);
3347 if (bd->gen_no > N) {
3348 if (bd->gen_no < evac_gen) {
3349 failed_to_evac = rtsTrue;
3354 // Don't promote blackholes
3356 if (!(stp->gen_no == 0 &&
3358 stp->no == stp->gen->n_steps-1)) {
3365 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3366 sizeofW(StgHeader), stp);
3367 frame->updatee = to;
3370 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3371 frame->updatee = to;
3372 recordMutable((StgMutClosure *)to);
3375 /* will never be SE_{,CAF_}BLACKHOLE, since we
3376 don't push an update frame for single-entry thunks. KSW 1999-01. */
3377 barf("scavenge_stack: UPDATE_FRAME updatee");
3383 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3390 bitmap = info->layout.bitmap;
3392 // this assumes that the payload starts immediately after the info-ptr
3394 while (bitmap != 0) {
3395 if ((bitmap & 1) == 0) {
3396 (StgClosure *)*p = evacuate((StgClosure *)*p);
3399 bitmap = bitmap >> 1;
3406 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3411 StgLargeBitmap *large_bitmap;
3414 large_bitmap = info->layout.large_bitmap;
3417 for (i=0; i<large_bitmap->size; i++) {
3418 bitmap = large_bitmap->bitmap[i];
3419 q = p + BITS_IN(W_);
3420 while (bitmap != 0) {
3421 if ((bitmap & 1) == 0) {
3422 (StgClosure *)*p = evacuate((StgClosure *)*p);
3425 bitmap = bitmap >> 1;
3427 if (i+1 < large_bitmap->size) {
3429 (StgClosure *)*p = evacuate((StgClosure *)*p);
3435 // and don't forget to follow the SRT
3440 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3445 /*-----------------------------------------------------------------------------
3446 scavenge the large object list.
3448 evac_gen set by caller; similar games played with evac_gen as with
3449 scavenge() - see comment at the top of scavenge(). Most large
3450 objects are (repeatedly) mutable, so most of the time evac_gen will
3452 --------------------------------------------------------------------------- */
3455 scavenge_large(step *stp)
3460 bd = stp->new_large_objects;
3462 for (; bd != NULL; bd = stp->new_large_objects) {
3464 /* take this object *off* the large objects list and put it on
3465 * the scavenged large objects list. This is so that we can
3466 * treat new_large_objects as a stack and push new objects on
3467 * the front when evacuating.
3469 stp->new_large_objects = bd->link;
3470 dbl_link_onto(bd, &stp->scavenged_large_objects);
3472 // update the block count in this step.
3473 stp->n_scavenged_large_blocks += bd->blocks;
3476 if (scavenge_one(p)) {
3477 mkMutCons((StgClosure *)p, stp->gen);
3482 /* -----------------------------------------------------------------------------
3483 Initialising the static object & mutable lists
3484 -------------------------------------------------------------------------- */
3487 zero_static_object_list(StgClosure* first_static)
3491 const StgInfoTable *info;
3493 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3495 link = STATIC_LINK(info, p);
3496 STATIC_LINK(info,p) = NULL;
3500 /* This function is only needed because we share the mutable link
3501 * field with the static link field in an IND_STATIC, so we have to
3502 * zero the mut_link field before doing a major GC, which needs the
3503 * static link field.
3505 * It doesn't do any harm to zero all the mutable link fields on the
3510 zero_mutable_list( StgMutClosure *first )
3512 StgMutClosure *next, *c;
3514 for (c = first; c != END_MUT_LIST; c = next) {
3520 /* -----------------------------------------------------------------------------
3522 -------------------------------------------------------------------------- */
3529 for (c = (StgIndStatic *)caf_list; c != NULL;
3530 c = (StgIndStatic *)c->static_link)
3532 c->header.info = c->saved_info;
3533 c->saved_info = NULL;
3534 // could, but not necessary: c->static_link = NULL;
3540 markCAFs( evac_fn evac )
3544 for (c = (StgIndStatic *)caf_list; c != NULL;
3545 c = (StgIndStatic *)c->static_link)
3547 evac(&c->indirectee);
3551 /* -----------------------------------------------------------------------------
3552 Sanity code for CAF garbage collection.
3554 With DEBUG turned on, we manage a CAF list in addition to the SRT
3555 mechanism. After GC, we run down the CAF list and blackhole any
3556 CAFs which have been garbage collected. This means we get an error
3557 whenever the program tries to enter a garbage collected CAF.
3559 Any garbage collected CAFs are taken off the CAF list at the same
3561 -------------------------------------------------------------------------- */
3563 #if 0 && defined(DEBUG)
3570 const StgInfoTable *info;
3581 ASSERT(info->type == IND_STATIC);
3583 if (STATIC_LINK(info,p) == NULL) {
3584 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3586 SET_INFO(p,&stg_BLACKHOLE_info);
3587 p = STATIC_LINK2(info,p);
3591 pp = &STATIC_LINK2(info,p);
3598 // belch("%d CAFs live", i);
3603 /* -----------------------------------------------------------------------------
3606 Whenever a thread returns to the scheduler after possibly doing
3607 some work, we have to run down the stack and black-hole all the
3608 closures referred to by update frames.
3609 -------------------------------------------------------------------------- */
3612 threadLazyBlackHole(StgTSO *tso)
3614 StgUpdateFrame *update_frame;
3615 StgBlockingQueue *bh;
3618 stack_end = &tso->stack[tso->stack_size];
3619 update_frame = tso->su;
3622 switch (get_itbl(update_frame)->type) {
3625 update_frame = ((StgCatchFrame *)update_frame)->link;
3629 bh = (StgBlockingQueue *)update_frame->updatee;
3631 /* if the thunk is already blackholed, it means we've also
3632 * already blackholed the rest of the thunks on this stack,
3633 * so we can stop early.
3635 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3636 * don't interfere with this optimisation.
3638 if (bh->header.info == &stg_BLACKHOLE_info) {
3642 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3643 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3644 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3645 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3649 // We pretend that bh is now dead.
3650 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
3652 SET_INFO(bh,&stg_BLACKHOLE_info);
3655 // We pretend that bh has just been created.
3656 LDV_recordCreate(bh);
3660 update_frame = update_frame->link;
3664 update_frame = ((StgSeqFrame *)update_frame)->link;
3670 barf("threadPaused");
3676 /* -----------------------------------------------------------------------------
3679 * Code largely pinched from old RTS, then hacked to bits. We also do
3680 * lazy black holing here.
3682 * -------------------------------------------------------------------------- */
3685 threadSqueezeStack(StgTSO *tso)
3687 lnat displacement = 0;
3688 StgUpdateFrame *frame;
3689 StgUpdateFrame *next_frame; // Temporally next
3690 StgUpdateFrame *prev_frame; // Temporally previous
3692 rtsBool prev_was_update_frame;
3694 StgUpdateFrame *top_frame;
3695 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3697 void printObj( StgClosure *obj ); // from Printer.c
3699 top_frame = tso->su;
3702 bottom = &(tso->stack[tso->stack_size]);
3705 /* There must be at least one frame, namely the STOP_FRAME.
3707 ASSERT((P_)frame < bottom);
3709 /* Walk down the stack, reversing the links between frames so that
3710 * we can walk back up as we squeeze from the bottom. Note that
3711 * next_frame and prev_frame refer to next and previous as they were
3712 * added to the stack, rather than the way we see them in this
3713 * walk. (It makes the next loop less confusing.)
3715 * Stop if we find an update frame pointing to a black hole
3716 * (see comment in threadLazyBlackHole()).
3720 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3721 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3722 prev_frame = frame->link;
3723 frame->link = next_frame;
3728 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3729 printObj((StgClosure *)prev_frame);
3730 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3733 switch (get_itbl(frame)->type) {
3736 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3749 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3751 printObj((StgClosure *)prev_frame);
3754 if (get_itbl(frame)->type == UPDATE_FRAME
3755 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3760 /* Now, we're at the bottom. Frame points to the lowest update
3761 * frame on the stack, and its link actually points to the frame
3762 * above. We have to walk back up the stack, squeezing out empty
3763 * update frames and turning the pointers back around on the way
3766 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3767 * we never want to eliminate it anyway. Just walk one step up
3768 * before starting to squeeze. When you get to the topmost frame,
3769 * remember that there are still some words above it that might have
3776 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3779 * Loop through all of the frames (everything except the very
3780 * bottom). Things are complicated by the fact that we have
3781 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3782 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3784 while (frame != NULL) {
3786 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3787 rtsBool is_update_frame;
3789 next_frame = frame->link;
3790 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3793 * 1. both the previous and current frame are update frames
3794 * 2. the current frame is empty
3796 if (prev_was_update_frame && is_update_frame &&
3797 (P_)prev_frame == frame_bottom + displacement) {
3799 // Now squeeze out the current frame
3800 StgClosure *updatee_keep = prev_frame->updatee;
3801 StgClosure *updatee_bypass = frame->updatee;
3804 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3808 /* Deal with blocking queues. If both updatees have blocked
3809 * threads, then we should merge the queues into the update
3810 * frame that we're keeping.
3812 * Alternatively, we could just wake them up: they'll just go
3813 * straight to sleep on the proper blackhole! This is less code
3814 * and probably less bug prone, although it's probably much
3817 #if 0 // do it properly...
3818 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3819 # error Unimplemented lazy BH warning. (KSW 1999-01)
3821 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3822 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3824 // Sigh. It has one. Don't lose those threads!
3825 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3826 // Urgh. Two queues. Merge them.
3827 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3829 while (keep_tso->link != END_TSO_QUEUE) {
3830 keep_tso = keep_tso->link;
3832 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3835 // For simplicity, just swap the BQ for the BH
3836 P_ temp = updatee_keep;
3838 updatee_keep = updatee_bypass;
3839 updatee_bypass = temp;
3841 // Record the swap in the kept frame (below)
3842 prev_frame->updatee = updatee_keep;
3847 TICK_UPD_SQUEEZED();
3848 /* wasn't there something about update squeezing and ticky to be
3849 * sorted out? oh yes: we aren't counting each enter properly
3850 * in this case. See the log somewhere. KSW 1999-04-21
3852 * Check two things: that the two update frames don't point to
3853 * the same object, and that the updatee_bypass isn't already an
3854 * indirection. Both of these cases only happen when we're in a
3855 * block hole-style loop (and there are multiple update frames
3856 * on the stack pointing to the same closure), but they can both
3857 * screw us up if we don't check.
3859 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3860 // this wakes the threads up
3861 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3864 sp = (P_)frame - 1; // sp = stuff to slide
3865 displacement += sizeofW(StgUpdateFrame);
3868 // No squeeze for this frame
3869 sp = frame_bottom - 1; // Keep the current frame
3871 /* Do lazy black-holing.
3873 if (is_update_frame) {
3874 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3875 if (bh->header.info != &stg_BLACKHOLE_info &&
3876 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3877 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3878 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3879 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3882 /* zero out the slop so that the sanity checker can tell
3883 * where the next closure is.
3886 StgInfoTable *info = get_itbl(bh);
3887 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3888 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3889 * info is used for a different purpose, and it's exactly the
3890 * same size as a BLACKHOLE in any case.
3892 if (info->type != THUNK_SELECTOR) {
3893 for (i = np; i < np + nw; i++) {
3894 ((StgClosure *)bh)->payload[i] = 0;
3901 // We pretend that bh is now dead.
3902 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
3905 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
3907 SET_INFO(bh,&stg_BLACKHOLE_info);
3910 // We pretend that bh has just been created.
3911 LDV_recordCreate(bh);
3916 // Fix the link in the current frame (should point to the frame below)
3917 frame->link = prev_frame;
3918 prev_was_update_frame = is_update_frame;
3921 // Now slide all words from sp up to the next frame
3923 if (displacement > 0) {
3924 P_ next_frame_bottom;
3926 if (next_frame != NULL)
3927 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3929 next_frame_bottom = tso->sp - 1;
3933 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3937 while (sp >= next_frame_bottom) {
3938 sp[displacement] = *sp;
3942 (P_)prev_frame = (P_)frame + displacement;
3946 tso->sp += displacement;
3947 tso->su = prev_frame;
3950 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3951 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3956 /* -----------------------------------------------------------------------------
3959 * We have to prepare for GC - this means doing lazy black holing
3960 * here. We also take the opportunity to do stack squeezing if it's
3962 * -------------------------------------------------------------------------- */
3964 threadPaused(StgTSO *tso)
3966 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3967 threadSqueezeStack(tso); // does black holing too
3969 threadLazyBlackHole(tso);
3972 /* -----------------------------------------------------------------------------
3974 * -------------------------------------------------------------------------- */
3978 printMutOnceList(generation *gen)
3980 StgMutClosure *p, *next;
3982 p = gen->mut_once_list;
3985 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3986 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3987 fprintf(stderr, "%p (%s), ",
3988 p, info_type((StgClosure *)p));
3990 fputc('\n', stderr);
3994 printMutableList(generation *gen)
3996 StgMutClosure *p, *next;
4001 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
4002 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
4003 fprintf(stderr, "%p (%s), ",
4004 p, info_type((StgClosure *)p));
4006 fputc('\n', stderr);
4009 static inline rtsBool
4010 maybeLarge(StgClosure *closure)
4012 StgInfoTable *info = get_itbl(closure);
4014 /* closure types that may be found on the new_large_objects list;
4015 see scavenge_large */
4016 return (info->type == MUT_ARR_PTRS ||
4017 info->type == MUT_ARR_PTRS_FROZEN ||
4018 info->type == TSO ||
4019 info->type == ARR_WORDS);