1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.129 2001/11/28 15:42:05 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
15 #include "StoragePriv.h"
18 #include "SchedAPI.h" // for ReverCAFs prototype
20 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
30 #if defined(GRAN) || defined(PAR)
31 # include "GranSimRts.h"
32 # include "ParallelRts.h"
36 # include "ParallelDebug.h"
41 #if defined(RTS_GTK_FRONTPANEL)
42 #include "FrontPanel.h"
45 #include "RetainerProfile.h"
46 #include "LdvProfile.h"
48 /* STATIC OBJECT LIST.
51 * We maintain a linked list of static objects that are still live.
52 * The requirements for this list are:
54 * - we need to scan the list while adding to it, in order to
55 * scavenge all the static objects (in the same way that
56 * breadth-first scavenging works for dynamic objects).
58 * - we need to be able to tell whether an object is already on
59 * the list, to break loops.
61 * Each static object has a "static link field", which we use for
62 * linking objects on to the list. We use a stack-type list, consing
63 * objects on the front as they are added (this means that the
64 * scavenge phase is depth-first, not breadth-first, but that
67 * A separate list is kept for objects that have been scavenged
68 * already - this is so that we can zero all the marks afterwards.
70 * An object is on the list if its static link field is non-zero; this
71 * means that we have to mark the end of the list with '1', not NULL.
73 * Extra notes for generational GC:
75 * Each generation has a static object list associated with it. When
76 * collecting generations up to N, we treat the static object lists
77 * from generations > N as roots.
79 * We build up a static object list while collecting generations 0..N,
80 * which is then appended to the static object list of generation N+1.
82 StgClosure* static_objects; // live static objects
83 StgClosure* scavenged_static_objects; // static objects scavenged so far
85 /* N is the oldest generation being collected, where the generations
86 * are numbered starting at 0. A major GC (indicated by the major_gc
87 * flag) is when we're collecting all generations. We only attempt to
88 * deal with static objects and GC CAFs when doing a major GC.
91 static rtsBool major_gc;
93 /* Youngest generation that objects should be evacuated to in
94 * evacuate(). (Logically an argument to evacuate, but it's static
95 * a lot of the time so we optimise it into a global variable).
101 StgWeak *old_weak_ptr_list; // also pending finaliser list
102 static rtsBool weak_done; // all done for this pass
104 /* List of all threads during GC
106 static StgTSO *old_all_threads;
107 static StgTSO *resurrected_threads;
109 /* Flag indicating failure to evacuate an object to the desired
112 static rtsBool failed_to_evac;
114 /* Old to-space (used for two-space collector only)
116 bdescr *old_to_blocks;
118 /* Data used for allocation area sizing.
120 lnat new_blocks; // blocks allocated during this GC
121 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
123 /* Used to avoid long recursion due to selector thunks
125 lnat thunk_selector_depth = 0;
126 #define MAX_THUNK_SELECTOR_DEPTH 256
128 /* -----------------------------------------------------------------------------
129 Static function declarations
130 -------------------------------------------------------------------------- */
132 static void mark_root ( StgClosure **root );
133 static StgClosure * evacuate ( StgClosure *q );
134 static void zero_static_object_list ( StgClosure* first_static );
135 static void zero_mutable_list ( StgMutClosure *first );
137 static rtsBool traverse_weak_ptr_list ( void );
138 static void mark_weak_ptr_list ( StgWeak **list );
140 static void scavenge ( step * );
141 static void scavenge_mark_stack ( void );
142 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
143 static rtsBool scavenge_one ( StgPtr p );
144 static void scavenge_large ( step * );
145 static void scavenge_static ( void );
146 static void scavenge_mutable_list ( generation *g );
147 static void scavenge_mut_once_list ( generation *g );
149 #if 0 && defined(DEBUG)
150 static void gcCAFs ( void );
153 /* -----------------------------------------------------------------------------
154 inline functions etc. for dealing with the mark bitmap & stack.
155 -------------------------------------------------------------------------- */
157 #define MARK_STACK_BLOCKS 4
159 static bdescr *mark_stack_bdescr;
160 static StgPtr *mark_stack;
161 static StgPtr *mark_sp;
162 static StgPtr *mark_splim;
164 // Flag and pointers used for falling back to a linear scan when the
165 // mark stack overflows.
166 static rtsBool mark_stack_overflowed;
167 static bdescr *oldgen_scan_bd;
168 static StgPtr oldgen_scan;
170 static inline rtsBool
171 mark_stack_empty(void)
173 return mark_sp == mark_stack;
176 static inline rtsBool
177 mark_stack_full(void)
179 return mark_sp >= mark_splim;
183 reset_mark_stack(void)
185 mark_sp = mark_stack;
189 push_mark_stack(StgPtr p)
200 /* -----------------------------------------------------------------------------
203 For garbage collecting generation N (and all younger generations):
205 - follow all pointers in the root set. the root set includes all
206 mutable objects in all steps in all generations.
208 - for each pointer, evacuate the object it points to into either
209 + to-space in the next higher step in that generation, if one exists,
210 + if the object's generation == N, then evacuate it to the next
211 generation if one exists, or else to-space in the current
213 + if the object's generation < N, then evacuate it to to-space
214 in the next generation.
216 - repeatedly scavenge to-space from each step in each generation
217 being collected until no more objects can be evacuated.
219 - free from-space in each step, and set from-space = to-space.
221 -------------------------------------------------------------------------- */
224 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
228 lnat live, allocated, collected = 0, copied = 0;
229 lnat oldgen_saved_blocks = 0;
233 CostCentreStack *prev_CCS;
236 #if defined(DEBUG) && defined(GRAN)
237 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
241 // tell the stats department that we've started a GC
244 // Init stats and print par specific (timing) info
245 PAR_TICKY_PAR_START();
247 // attribute any costs to CCS_GC
253 /* Approximate how much we allocated.
254 * Todo: only when generating stats?
256 allocated = calcAllocated();
258 /* Figure out which generation to collect
260 if (force_major_gc) {
261 N = RtsFlags.GcFlags.generations - 1;
265 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
266 if (generations[g].steps[0].n_blocks +
267 generations[g].steps[0].n_large_blocks
268 >= generations[g].max_blocks) {
272 major_gc = (N == RtsFlags.GcFlags.generations-1);
275 #ifdef RTS_GTK_FRONTPANEL
276 if (RtsFlags.GcFlags.frontpanel) {
277 updateFrontPanelBeforeGC(N);
281 // check stack sanity *before* GC (ToDo: check all threads)
283 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
285 IF_DEBUG(sanity, checkFreeListSanity());
287 /* Initialise the static object lists
289 static_objects = END_OF_STATIC_LIST;
290 scavenged_static_objects = END_OF_STATIC_LIST;
292 /* zero the mutable list for the oldest generation (see comment by
293 * zero_mutable_list below).
296 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
299 /* Save the old to-space if we're doing a two-space collection
301 if (RtsFlags.GcFlags.generations == 1) {
302 old_to_blocks = g0s0->to_blocks;
303 g0s0->to_blocks = NULL;
306 /* Keep a count of how many new blocks we allocated during this GC
307 * (used for resizing the allocation area, later).
311 /* Initialise to-space in all the generations/steps that we're
314 for (g = 0; g <= N; g++) {
315 generations[g].mut_once_list = END_MUT_LIST;
316 generations[g].mut_list = END_MUT_LIST;
318 for (s = 0; s < generations[g].n_steps; s++) {
320 // generation 0, step 0 doesn't need to-space
321 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
325 /* Get a free block for to-space. Extra blocks will be chained on
329 stp = &generations[g].steps[s];
330 ASSERT(stp->gen_no == g);
331 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
335 bd->flags = BF_EVACUATED; // it's a to-space block
337 stp->hpLim = stp->hp + BLOCK_SIZE_W;
340 stp->n_to_blocks = 1;
341 stp->scan = bd->start;
343 stp->new_large_objects = NULL;
344 stp->scavenged_large_objects = NULL;
345 stp->n_scavenged_large_blocks = 0;
347 // mark the large objects as not evacuated yet
348 for (bd = stp->large_objects; bd; bd = bd->link) {
349 bd->flags = BF_LARGE;
352 // for a compacted step, we need to allocate the bitmap
353 if (stp->is_compacted) {
354 nat bitmap_size; // in bytes
355 bdescr *bitmap_bdescr;
358 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
360 if (bitmap_size > 0) {
361 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
363 stp->bitmap = bitmap_bdescr;
364 bitmap = bitmap_bdescr->start;
366 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
367 bitmap_size, bitmap););
369 // don't forget to fill it with zeros!
370 memset(bitmap, 0, bitmap_size);
372 // for each block in this step, point to its bitmap from the
374 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
375 bd->u.bitmap = bitmap;
376 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
383 /* make sure the older generations have at least one block to
384 * allocate into (this makes things easier for copy(), see below.
386 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
387 for (s = 0; s < generations[g].n_steps; s++) {
388 stp = &generations[g].steps[s];
389 if (stp->hp_bd == NULL) {
390 ASSERT(stp->blocks == NULL);
395 bd->flags = 0; // *not* a to-space block or a large object
397 stp->hpLim = stp->hp + BLOCK_SIZE_W;
403 /* Set the scan pointer for older generations: remember we
404 * still have to scavenge objects that have been promoted. */
406 stp->scan_bd = stp->hp_bd;
407 stp->to_blocks = NULL;
408 stp->n_to_blocks = 0;
409 stp->new_large_objects = NULL;
410 stp->scavenged_large_objects = NULL;
411 stp->n_scavenged_large_blocks = 0;
415 /* Allocate a mark stack if we're doing a major collection.
418 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
419 mark_stack = (StgPtr *)mark_stack_bdescr->start;
420 mark_sp = mark_stack;
421 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
423 mark_stack_bdescr = NULL;
426 /* -----------------------------------------------------------------------
427 * follow all the roots that we know about:
428 * - mutable lists from each generation > N
429 * we want to *scavenge* these roots, not evacuate them: they're not
430 * going to move in this GC.
431 * Also: do them in reverse generation order. This is because we
432 * often want to promote objects that are pointed to by older
433 * generations early, so we don't have to repeatedly copy them.
434 * Doing the generations in reverse order ensures that we don't end
435 * up in the situation where we want to evac an object to gen 3 and
436 * it has already been evaced to gen 2.
440 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
441 generations[g].saved_mut_list = generations[g].mut_list;
442 generations[g].mut_list = END_MUT_LIST;
445 // Do the mut-once lists first
446 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
447 IF_PAR_DEBUG(verbose,
448 printMutOnceList(&generations[g]));
449 scavenge_mut_once_list(&generations[g]);
451 for (st = generations[g].n_steps-1; st >= 0; st--) {
452 scavenge(&generations[g].steps[st]);
456 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
457 IF_PAR_DEBUG(verbose,
458 printMutableList(&generations[g]));
459 scavenge_mutable_list(&generations[g]);
461 for (st = generations[g].n_steps-1; st >= 0; st--) {
462 scavenge(&generations[g].steps[st]);
467 /* follow roots from the CAF list (used by GHCi)
472 /* follow all the roots that the application knows about.
475 get_roots(mark_root);
478 /* And don't forget to mark the TSO if we got here direct from
480 /* Not needed in a seq version?
482 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
486 // Mark the entries in the GALA table of the parallel system
487 markLocalGAs(major_gc);
488 // Mark all entries on the list of pending fetches
489 markPendingFetches(major_gc);
492 /* Mark the weak pointer list, and prepare to detect dead weak
495 mark_weak_ptr_list(&weak_ptr_list);
496 old_weak_ptr_list = weak_ptr_list;
497 weak_ptr_list = NULL;
498 weak_done = rtsFalse;
500 /* The all_threads list is like the weak_ptr_list.
501 * See traverse_weak_ptr_list() for the details.
503 old_all_threads = all_threads;
504 all_threads = END_TSO_QUEUE;
505 resurrected_threads = END_TSO_QUEUE;
507 /* Mark the stable pointer table.
509 markStablePtrTable(mark_root);
513 /* ToDo: To fix the caf leak, we need to make the commented out
514 * parts of this code do something sensible - as described in
517 extern void markHugsObjects(void);
522 /* -------------------------------------------------------------------------
523 * Repeatedly scavenge all the areas we know about until there's no
524 * more scavenging to be done.
531 // scavenge static objects
532 if (major_gc && static_objects != END_OF_STATIC_LIST) {
533 IF_DEBUG(sanity, checkStaticObjects(static_objects));
537 /* When scavenging the older generations: Objects may have been
538 * evacuated from generations <= N into older generations, and we
539 * need to scavenge these objects. We're going to try to ensure that
540 * any evacuations that occur move the objects into at least the
541 * same generation as the object being scavenged, otherwise we
542 * have to create new entries on the mutable list for the older
546 // scavenge each step in generations 0..maxgen
552 // scavenge objects in compacted generation
553 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
554 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
555 scavenge_mark_stack();
559 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
560 for (st = generations[gen].n_steps; --st >= 0; ) {
561 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
564 stp = &generations[gen].steps[st];
566 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
571 if (stp->new_large_objects != NULL) {
580 if (flag) { goto loop; }
583 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
589 // Reconstruct the Global Address tables used in GUM
590 rebuildGAtables(major_gc);
591 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
594 // Now see which stable names are still alive.
597 // Tidy the end of the to-space chains
598 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
599 for (s = 0; s < generations[g].n_steps; s++) {
600 stp = &generations[g].steps[s];
601 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
602 stp->hp_bd->free = stp->hp;
603 stp->hp_bd->link = NULL;
609 // We call processHeapClosureForDead() on every closure destroyed during
610 // the current garbage collection, so we invoke LdvCensusForDead().
611 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
612 || RtsFlags.ProfFlags.bioSelector != NULL)
616 // NO MORE EVACUATION AFTER THIS POINT!
617 // Finally: compaction of the oldest generation.
618 if (major_gc && oldest_gen->steps[0].is_compacted) {
619 // save number of blocks for stats
620 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
624 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
626 /* run through all the generations/steps and tidy up
628 copied = new_blocks * BLOCK_SIZE_W;
629 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
632 generations[g].collections++; // for stats
635 for (s = 0; s < generations[g].n_steps; s++) {
637 stp = &generations[g].steps[s];
639 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
640 // stats information: how much we copied
642 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
647 // for generations we collected...
650 // rough calculation of garbage collected, for stats output
651 if (stp->is_compacted) {
652 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
654 collected += stp->n_blocks * BLOCK_SIZE_W;
657 /* free old memory and shift to-space into from-space for all
658 * the collected steps (except the allocation area). These
659 * freed blocks will probaby be quickly recycled.
661 if (!(g == 0 && s == 0)) {
662 if (stp->is_compacted) {
663 // for a compacted step, just shift the new to-space
664 // onto the front of the now-compacted existing blocks.
665 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
666 bd->flags &= ~BF_EVACUATED; // now from-space
668 // tack the new blocks on the end of the existing blocks
669 if (stp->blocks == NULL) {
670 stp->blocks = stp->to_blocks;
672 for (bd = stp->blocks; bd != NULL; bd = next) {
675 bd->link = stp->to_blocks;
679 // add the new blocks to the block tally
680 stp->n_blocks += stp->n_to_blocks;
682 freeChain(stp->blocks);
683 stp->blocks = stp->to_blocks;
684 stp->n_blocks = stp->n_to_blocks;
685 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
686 bd->flags &= ~BF_EVACUATED; // now from-space
689 stp->to_blocks = NULL;
690 stp->n_to_blocks = 0;
693 /* LARGE OBJECTS. The current live large objects are chained on
694 * scavenged_large, having been moved during garbage
695 * collection from large_objects. Any objects left on
696 * large_objects list are therefore dead, so we free them here.
698 for (bd = stp->large_objects; bd != NULL; bd = next) {
704 // update the count of blocks used by large objects
705 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
706 bd->flags &= ~BF_EVACUATED;
708 stp->large_objects = stp->scavenged_large_objects;
709 stp->n_large_blocks = stp->n_scavenged_large_blocks;
712 // for older generations...
714 /* For older generations, we need to append the
715 * scavenged_large_object list (i.e. large objects that have been
716 * promoted during this GC) to the large_object list for that step.
718 for (bd = stp->scavenged_large_objects; bd; bd = next) {
720 bd->flags &= ~BF_EVACUATED;
721 dbl_link_onto(bd, &stp->large_objects);
724 // add the new blocks we promoted during this GC
725 stp->n_blocks += stp->n_to_blocks;
726 stp->n_large_blocks += stp->n_scavenged_large_blocks;
731 /* Reset the sizes of the older generations when we do a major
734 * CURRENT STRATEGY: make all generations except zero the same size.
735 * We have to stay within the maximum heap size, and leave a certain
736 * percentage of the maximum heap size available to allocate into.
738 if (major_gc && RtsFlags.GcFlags.generations > 1) {
739 nat live, size, min_alloc;
740 nat max = RtsFlags.GcFlags.maxHeapSize;
741 nat gens = RtsFlags.GcFlags.generations;
743 // live in the oldest generations
744 live = oldest_gen->steps[0].n_blocks +
745 oldest_gen->steps[0].n_large_blocks;
747 // default max size for all generations except zero
748 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
749 RtsFlags.GcFlags.minOldGenSize);
751 // minimum size for generation zero
752 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
753 RtsFlags.GcFlags.minAllocAreaSize);
755 // Auto-enable compaction when the residency reaches a
756 // certain percentage of the maximum heap size (default: 30%).
757 if (RtsFlags.GcFlags.generations > 1 &&
758 (RtsFlags.GcFlags.compact ||
760 oldest_gen->steps[0].n_blocks >
761 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
762 oldest_gen->steps[0].is_compacted = 1;
763 // fprintf(stderr,"compaction: on\n", live);
765 oldest_gen->steps[0].is_compacted = 0;
766 // fprintf(stderr,"compaction: off\n", live);
769 // if we're going to go over the maximum heap size, reduce the
770 // size of the generations accordingly. The calculation is
771 // different if compaction is turned on, because we don't need
772 // to double the space required to collect the old generation.
775 // this test is necessary to ensure that the calculations
776 // below don't have any negative results - we're working
777 // with unsigned values here.
778 if (max < min_alloc) {
782 if (oldest_gen->steps[0].is_compacted) {
783 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
784 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
787 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
788 size = (max - min_alloc) / ((gens - 1) * 2);
798 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
799 min_alloc, size, max);
802 for (g = 0; g < gens; g++) {
803 generations[g].max_blocks = size;
807 // Guess the amount of live data for stats.
810 /* Free the small objects allocated via allocate(), since this will
811 * all have been copied into G0S1 now.
813 if (small_alloc_list != NULL) {
814 freeChain(small_alloc_list);
816 small_alloc_list = NULL;
820 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
822 // Start a new pinned_object_block
823 pinned_object_block = NULL;
825 /* Free the mark stack.
827 if (mark_stack_bdescr != NULL) {
828 freeGroup(mark_stack_bdescr);
833 for (g = 0; g <= N; g++) {
834 for (s = 0; s < generations[g].n_steps; s++) {
835 stp = &generations[g].steps[s];
836 if (stp->is_compacted && stp->bitmap != NULL) {
837 freeGroup(stp->bitmap);
842 /* Two-space collector:
843 * Free the old to-space, and estimate the amount of live data.
845 if (RtsFlags.GcFlags.generations == 1) {
848 if (old_to_blocks != NULL) {
849 freeChain(old_to_blocks);
851 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
852 bd->flags = 0; // now from-space
855 /* For a two-space collector, we need to resize the nursery. */
857 /* set up a new nursery. Allocate a nursery size based on a
858 * function of the amount of live data (by default a factor of 2)
859 * Use the blocks from the old nursery if possible, freeing up any
862 * If we get near the maximum heap size, then adjust our nursery
863 * size accordingly. If the nursery is the same size as the live
864 * data (L), then we need 3L bytes. We can reduce the size of the
865 * nursery to bring the required memory down near 2L bytes.
867 * A normal 2-space collector would need 4L bytes to give the same
868 * performance we get from 3L bytes, reducing to the same
869 * performance at 2L bytes.
871 blocks = g0s0->n_to_blocks;
873 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
874 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
875 RtsFlags.GcFlags.maxHeapSize ) {
876 long adjusted_blocks; // signed on purpose
879 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
880 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
881 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
882 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
885 blocks = adjusted_blocks;
888 blocks *= RtsFlags.GcFlags.oldGenFactor;
889 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
890 blocks = RtsFlags.GcFlags.minAllocAreaSize;
893 resizeNursery(blocks);
896 /* Generational collector:
897 * If the user has given us a suggested heap size, adjust our
898 * allocation area to make best use of the memory available.
901 if (RtsFlags.GcFlags.heapSizeSuggestion) {
903 nat needed = calcNeeded(); // approx blocks needed at next GC
905 /* Guess how much will be live in generation 0 step 0 next time.
906 * A good approximation is obtained by finding the
907 * percentage of g0s0 that was live at the last minor GC.
910 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
913 /* Estimate a size for the allocation area based on the
914 * information available. We might end up going slightly under
915 * or over the suggested heap size, but we should be pretty
918 * Formula: suggested - needed
919 * ----------------------------
920 * 1 + g0s0_pcnt_kept/100
922 * where 'needed' is the amount of memory needed at the next
923 * collection for collecting all steps except g0s0.
926 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
927 (100 + (long)g0s0_pcnt_kept);
929 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
930 blocks = RtsFlags.GcFlags.minAllocAreaSize;
933 resizeNursery((nat)blocks);
936 // we might have added extra large blocks to the nursery, so
937 // resize back to minAllocAreaSize again.
938 resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
942 // mark the garbage collected CAFs as dead
943 #if 0 && defined(DEBUG) // doesn't work at the moment
944 if (major_gc) { gcCAFs(); }
948 // resetStaticObjectForRetainerProfiling() must be called before
950 resetStaticObjectForRetainerProfiling();
953 // zero the scavenged static object list
955 zero_static_object_list(scavenged_static_objects);
961 // start any pending finalizers
962 scheduleFinalizers(old_weak_ptr_list);
964 // send exceptions to any threads which were about to die
965 resurrectThreads(resurrected_threads);
967 // Update the stable pointer hash table.
968 updateStablePtrTable(major_gc);
970 // check sanity after GC
971 IF_DEBUG(sanity, checkSanity());
973 // extra GC trace info
974 IF_DEBUG(gc, statDescribeGens());
977 // symbol-table based profiling
978 /* heapCensus(to_blocks); */ /* ToDo */
981 // restore enclosing cost centre
986 // check for memory leaks if sanity checking is on
987 IF_DEBUG(sanity, memInventory());
989 #ifdef RTS_GTK_FRONTPANEL
990 if (RtsFlags.GcFlags.frontpanel) {
991 updateFrontPanelAfterGC( N, live );
995 // ok, GC over: tell the stats department what happened.
996 stat_endGC(allocated, collected, live, copied, N);
1002 /* -----------------------------------------------------------------------------
1005 traverse_weak_ptr_list is called possibly many times during garbage
1006 collection. It returns a flag indicating whether it did any work
1007 (i.e. called evacuate on any live pointers).
1009 Invariant: traverse_weak_ptr_list is called when the heap is in an
1010 idempotent state. That means that there are no pending
1011 evacuate/scavenge operations. This invariant helps the weak
1012 pointer code decide which weak pointers are dead - if there are no
1013 new live weak pointers, then all the currently unreachable ones are
1016 For generational GC: we just don't try to finalize weak pointers in
1017 older generations than the one we're collecting. This could
1018 probably be optimised by keeping per-generation lists of weak
1019 pointers, but for a few weak pointers this scheme will work.
1020 -------------------------------------------------------------------------- */
1023 traverse_weak_ptr_list(void)
1025 StgWeak *w, **last_w, *next_w;
1027 rtsBool flag = rtsFalse;
1029 if (weak_done) { return rtsFalse; }
1031 /* doesn't matter where we evacuate values/finalizers to, since
1032 * these pointers are treated as roots (iff the keys are alive).
1036 last_w = &old_weak_ptr_list;
1037 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1039 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1040 * called on a live weak pointer object. Just remove it.
1042 if (w->header.info == &stg_DEAD_WEAK_info) {
1043 next_w = ((StgDeadWeak *)w)->link;
1048 ASSERT(get_itbl(w)->type == WEAK);
1050 /* Now, check whether the key is reachable.
1052 new = isAlive(w->key);
1055 // evacuate the value and finalizer
1056 w->value = evacuate(w->value);
1057 w->finalizer = evacuate(w->finalizer);
1058 // remove this weak ptr from the old_weak_ptr list
1060 // and put it on the new weak ptr list
1062 w->link = weak_ptr_list;
1065 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1069 last_w = &(w->link);
1075 /* Now deal with the all_threads list, which behaves somewhat like
1076 * the weak ptr list. If we discover any threads that are about to
1077 * become garbage, we wake them up and administer an exception.
1080 StgTSO *t, *tmp, *next, **prev;
1082 prev = &old_all_threads;
1083 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1085 (StgClosure *)tmp = isAlive((StgClosure *)t);
1091 ASSERT(get_itbl(t)->type == TSO);
1092 switch (t->what_next) {
1093 case ThreadRelocated:
1098 case ThreadComplete:
1099 // finshed or died. The thread might still be alive, but we
1100 // don't keep it on the all_threads list. Don't forget to
1101 // stub out its global_link field.
1102 next = t->global_link;
1103 t->global_link = END_TSO_QUEUE;
1111 // not alive (yet): leave this thread on the old_all_threads list.
1112 prev = &(t->global_link);
1113 next = t->global_link;
1116 // alive: move this thread onto the all_threads list.
1117 next = t->global_link;
1118 t->global_link = all_threads;
1125 /* If we didn't make any changes, then we can go round and kill all
1126 * the dead weak pointers. The old_weak_ptr list is used as a list
1127 * of pending finalizers later on.
1129 if (flag == rtsFalse) {
1130 for (w = old_weak_ptr_list; w; w = w->link) {
1131 w->finalizer = evacuate(w->finalizer);
1134 /* And resurrect any threads which were about to become garbage.
1137 StgTSO *t, *tmp, *next;
1138 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1139 next = t->global_link;
1140 (StgClosure *)tmp = evacuate((StgClosure *)t);
1141 tmp->global_link = resurrected_threads;
1142 resurrected_threads = tmp;
1146 weak_done = rtsTrue;
1152 /* -----------------------------------------------------------------------------
1153 After GC, the live weak pointer list may have forwarding pointers
1154 on it, because a weak pointer object was evacuated after being
1155 moved to the live weak pointer list. We remove those forwarding
1158 Also, we don't consider weak pointer objects to be reachable, but
1159 we must nevertheless consider them to be "live" and retain them.
1160 Therefore any weak pointer objects which haven't as yet been
1161 evacuated need to be evacuated now.
1162 -------------------------------------------------------------------------- */
1166 mark_weak_ptr_list ( StgWeak **list )
1168 StgWeak *w, **last_w;
1171 for (w = *list; w; w = w->link) {
1172 (StgClosure *)w = evacuate((StgClosure *)w);
1174 last_w = &(w->link);
1178 /* -----------------------------------------------------------------------------
1179 isAlive determines whether the given closure is still alive (after
1180 a garbage collection) or not. It returns the new address of the
1181 closure if it is alive, or NULL otherwise.
1183 NOTE: Use it before compaction only!
1184 -------------------------------------------------------------------------- */
1188 isAlive(StgClosure *p)
1190 const StgInfoTable *info;
1197 /* ToDo: for static closures, check the static link field.
1198 * Problem here is that we sometimes don't set the link field, eg.
1199 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1204 // ignore closures in generations that we're not collecting.
1205 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1208 // large objects have an evacuated flag
1209 if (bd->flags & BF_LARGE) {
1210 if (bd->flags & BF_EVACUATED) {
1216 // check the mark bit for compacted steps
1217 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1221 switch (info->type) {
1226 case IND_OLDGEN: // rely on compatible layout with StgInd
1227 case IND_OLDGEN_PERM:
1228 // follow indirections
1229 p = ((StgInd *)p)->indirectee;
1234 return ((StgEvacuated *)p)->evacuee;
1237 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1238 p = (StgClosure *)((StgTSO *)p)->link;
1250 mark_root(StgClosure **root)
1252 *root = evacuate(*root);
1258 bdescr *bd = allocBlock();
1259 bd->gen_no = stp->gen_no;
1262 if (stp->gen_no <= N) {
1263 bd->flags = BF_EVACUATED;
1268 stp->hp_bd->free = stp->hp;
1269 stp->hp_bd->link = bd;
1270 stp->hp = bd->start;
1271 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1278 static __inline__ void
1279 upd_evacuee(StgClosure *p, StgClosure *dest)
1281 p->header.info = &stg_EVACUATED_info;
1282 ((StgEvacuated *)p)->evacuee = dest;
1286 static __inline__ StgClosure *
1287 copy(StgClosure *src, nat size, step *stp)
1292 nat size_org = size;
1295 TICK_GC_WORDS_COPIED(size);
1296 /* Find out where we're going, using the handy "to" pointer in
1297 * the step of the source object. If it turns out we need to
1298 * evacuate to an older generation, adjust it here (see comment
1301 if (stp->gen_no < evac_gen) {
1302 #ifdef NO_EAGER_PROMOTION
1303 failed_to_evac = rtsTrue;
1305 stp = &generations[evac_gen].steps[0];
1309 /* chain a new block onto the to-space for the destination step if
1312 if (stp->hp + size >= stp->hpLim) {
1316 for(to = stp->hp, from = (P_)src; size>0; --size) {
1322 upd_evacuee(src,(StgClosure *)dest);
1324 // We store the size of the just evacuated object in the LDV word so that
1325 // the profiler can guess the position of the next object later.
1326 SET_EVACUAEE_FOR_LDV(src, size_org);
1328 return (StgClosure *)dest;
1331 /* Special version of copy() for when we only want to copy the info
1332 * pointer of an object, but reserve some padding after it. This is
1333 * used to optimise evacuation of BLACKHOLEs.
1338 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1343 nat size_to_copy_org = size_to_copy;
1346 TICK_GC_WORDS_COPIED(size_to_copy);
1347 if (stp->gen_no < evac_gen) {
1348 #ifdef NO_EAGER_PROMOTION
1349 failed_to_evac = rtsTrue;
1351 stp = &generations[evac_gen].steps[0];
1355 if (stp->hp + size_to_reserve >= stp->hpLim) {
1359 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1364 stp->hp += size_to_reserve;
1365 upd_evacuee(src,(StgClosure *)dest);
1367 // We store the size of the just evacuated object in the LDV word so that
1368 // the profiler can guess the position of the next object later.
1369 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1371 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1373 if (size_to_reserve - size_to_copy_org > 0)
1374 FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1376 return (StgClosure *)dest;
1380 /* -----------------------------------------------------------------------------
1381 Evacuate a large object
1383 This just consists of removing the object from the (doubly-linked)
1384 large_alloc_list, and linking it on to the (singly-linked)
1385 new_large_objects list, from where it will be scavenged later.
1387 Convention: bd->flags has BF_EVACUATED set for a large object
1388 that has been evacuated, or unset otherwise.
1389 -------------------------------------------------------------------------- */
1393 evacuate_large(StgPtr p)
1395 bdescr *bd = Bdescr(p);
1398 // object must be at the beginning of the block (or be a ByteArray)
1399 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1400 (((W_)p & BLOCK_MASK) == 0));
1402 // already evacuated?
1403 if (bd->flags & BF_EVACUATED) {
1404 /* Don't forget to set the failed_to_evac flag if we didn't get
1405 * the desired destination (see comments in evacuate()).
1407 if (bd->gen_no < evac_gen) {
1408 failed_to_evac = rtsTrue;
1409 TICK_GC_FAILED_PROMOTION();
1415 // remove from large_object list
1417 bd->u.back->link = bd->link;
1418 } else { // first object in the list
1419 stp->large_objects = bd->link;
1422 bd->link->u.back = bd->u.back;
1425 /* link it on to the evacuated large object list of the destination step
1428 if (stp->gen_no < evac_gen) {
1429 #ifdef NO_EAGER_PROMOTION
1430 failed_to_evac = rtsTrue;
1432 stp = &generations[evac_gen].steps[0];
1437 bd->gen_no = stp->gen_no;
1438 bd->link = stp->new_large_objects;
1439 stp->new_large_objects = bd;
1440 bd->flags |= BF_EVACUATED;
1443 /* -----------------------------------------------------------------------------
1444 Adding a MUT_CONS to an older generation.
1446 This is necessary from time to time when we end up with an
1447 old-to-new generation pointer in a non-mutable object. We defer
1448 the promotion until the next GC.
1449 -------------------------------------------------------------------------- */
1453 mkMutCons(StgClosure *ptr, generation *gen)
1458 stp = &gen->steps[0];
1460 /* chain a new block onto the to-space for the destination step if
1463 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1467 q = (StgMutVar *)stp->hp;
1468 stp->hp += sizeofW(StgMutVar);
1470 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1472 recordOldToNewPtrs((StgMutClosure *)q);
1474 return (StgClosure *)q;
1477 /* -----------------------------------------------------------------------------
1480 This is called (eventually) for every live object in the system.
1482 The caller to evacuate specifies a desired generation in the
1483 evac_gen global variable. The following conditions apply to
1484 evacuating an object which resides in generation M when we're
1485 collecting up to generation N
1489 else evac to step->to
1491 if M < evac_gen evac to evac_gen, step 0
1493 if the object is already evacuated, then we check which generation
1496 if M >= evac_gen do nothing
1497 if M < evac_gen set failed_to_evac flag to indicate that we
1498 didn't manage to evacuate this object into evac_gen.
1500 -------------------------------------------------------------------------- */
1503 evacuate(StgClosure *q)
1508 const StgInfoTable *info;
1511 if (HEAP_ALLOCED(q)) {
1514 // not a group head: find the group head
1515 if (bd->blocks == 0) { bd = bd->link; }
1517 if (bd->gen_no > N) {
1518 /* Can't evacuate this object, because it's in a generation
1519 * older than the ones we're collecting. Let's hope that it's
1520 * in evac_gen or older, or we will have to arrange to track
1521 * this pointer using the mutable list.
1523 if (bd->gen_no < evac_gen) {
1525 failed_to_evac = rtsTrue;
1526 TICK_GC_FAILED_PROMOTION();
1531 /* evacuate large objects by re-linking them onto a different list.
1533 if (bd->flags & BF_LARGE) {
1535 if (info->type == TSO &&
1536 ((StgTSO *)q)->what_next == ThreadRelocated) {
1537 q = (StgClosure *)((StgTSO *)q)->link;
1540 evacuate_large((P_)q);
1544 /* If the object is in a step that we're compacting, then we
1545 * need to use an alternative evacuate procedure.
1547 if (bd->step->is_compacted) {
1548 if (!is_marked((P_)q,bd)) {
1550 if (mark_stack_full()) {
1551 mark_stack_overflowed = rtsTrue;
1554 push_mark_stack((P_)q);
1562 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1565 // make sure the info pointer is into text space
1566 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1567 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1570 switch (info -> type) {
1574 to = copy(q,sizeW_fromITBL(info),stp);
1579 StgWord w = (StgWord)q->payload[0];
1580 if (q->header.info == Czh_con_info &&
1581 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1582 (StgChar)w <= MAX_CHARLIKE) {
1583 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1585 if (q->header.info == Izh_con_info &&
1586 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1587 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1589 // else, fall through ...
1595 return copy(q,sizeofW(StgHeader)+1,stp);
1597 case THUNK_1_0: // here because of MIN_UPD_SIZE
1602 #ifdef NO_PROMOTE_THUNKS
1603 if (bd->gen_no == 0 &&
1604 bd->step->no != 0 &&
1605 bd->step->no == generations[bd->gen_no].n_steps-1) {
1609 return copy(q,sizeofW(StgHeader)+2,stp);
1617 return copy(q,sizeofW(StgHeader)+2,stp);
1623 case IND_OLDGEN_PERM:
1628 return copy(q,sizeW_fromITBL(info),stp);
1631 case SE_CAF_BLACKHOLE:
1634 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1637 to = copy(q,BLACKHOLE_sizeW(),stp);
1640 case THUNK_SELECTOR:
1642 const StgInfoTable* selectee_info;
1643 StgClosure* selectee = ((StgSelector*)q)->selectee;
1646 selectee_info = get_itbl(selectee);
1647 switch (selectee_info->type) {
1655 case CONSTR_NOCAF_STATIC:
1657 StgWord offset = info->layout.selector_offset;
1659 // check that the size is in range
1661 (StgWord32)(selectee_info->layout.payload.ptrs +
1662 selectee_info->layout.payload.nptrs));
1664 // perform the selection!
1665 q = selectee->payload[offset];
1667 /* if we're already in to-space, there's no need to continue
1668 * with the evacuation, just update the source address with
1669 * a pointer to the (evacuated) constructor field.
1671 if (HEAP_ALLOCED(q)) {
1672 bdescr *bd = Bdescr((P_)q);
1673 if (bd->flags & BF_EVACUATED) {
1674 if (bd->gen_no < evac_gen) {
1675 failed_to_evac = rtsTrue;
1676 TICK_GC_FAILED_PROMOTION();
1682 /* otherwise, carry on and evacuate this constructor field,
1683 * (but not the constructor itself)
1692 case IND_OLDGEN_PERM:
1693 selectee = ((StgInd *)selectee)->indirectee;
1697 selectee = ((StgEvacuated *)selectee)->evacuee;
1700 case THUNK_SELECTOR:
1702 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1703 something) to go into an infinite loop when the nightly
1704 stage2 compiles PrelTup.lhs. */
1706 /* we can't recurse indefinitely in evacuate(), so set a
1707 * limit on the number of times we can go around this
1710 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1712 bd = Bdescr((P_)selectee);
1713 if (!bd->flags & BF_EVACUATED) {
1714 thunk_selector_depth++;
1715 selectee = evacuate(selectee);
1716 thunk_selector_depth--;
1720 // otherwise, fall through...
1732 case SE_CAF_BLACKHOLE:
1736 // not evaluated yet
1740 // a copy of the top-level cases below
1741 case RBH: // cf. BLACKHOLE_BQ
1743 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1744 to = copy(q,BLACKHOLE_sizeW(),stp);
1745 //ToDo: derive size etc from reverted IP
1746 //to = copy(q,size,stp);
1747 // recordMutable((StgMutClosure *)to);
1752 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1753 to = copy(q,sizeofW(StgBlockedFetch),stp);
1760 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1761 to = copy(q,sizeofW(StgFetchMe),stp);
1765 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1766 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1771 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1772 (int)(selectee_info->type));
1775 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1779 // follow chains of indirections, don't evacuate them
1780 q = ((StgInd*)q)->indirectee;
1784 if (info->srt_len > 0 && major_gc &&
1785 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1786 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1787 static_objects = (StgClosure *)q;
1792 if (info->srt_len > 0 && major_gc &&
1793 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1794 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1795 static_objects = (StgClosure *)q;
1800 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1801 * on the CAF list, so don't do anything with it here (we'll
1802 * scavenge it later).
1805 && ((StgIndStatic *)q)->saved_info == NULL
1806 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1807 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1808 static_objects = (StgClosure *)q;
1813 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1814 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1815 static_objects = (StgClosure *)q;
1819 case CONSTR_INTLIKE:
1820 case CONSTR_CHARLIKE:
1821 case CONSTR_NOCAF_STATIC:
1822 /* no need to put these on the static linked list, they don't need
1837 // shouldn't see these
1838 barf("evacuate: stack frame at %p\n", q);
1842 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1843 * of stack, tagging and all.
1845 return copy(q,pap_sizeW((StgPAP*)q),stp);
1848 /* Already evacuated, just return the forwarding address.
1849 * HOWEVER: if the requested destination generation (evac_gen) is
1850 * older than the actual generation (because the object was
1851 * already evacuated to a younger generation) then we have to
1852 * set the failed_to_evac flag to indicate that we couldn't
1853 * manage to promote the object to the desired generation.
1855 if (evac_gen > 0) { // optimisation
1856 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1857 if (Bdescr((P_)p)->gen_no < evac_gen) {
1858 failed_to_evac = rtsTrue;
1859 TICK_GC_FAILED_PROMOTION();
1862 return ((StgEvacuated*)q)->evacuee;
1865 // just copy the block
1866 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1869 case MUT_ARR_PTRS_FROZEN:
1870 // just copy the block
1871 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1875 StgTSO *tso = (StgTSO *)q;
1877 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1879 if (tso->what_next == ThreadRelocated) {
1880 q = (StgClosure *)tso->link;
1884 /* To evacuate a small TSO, we need to relocate the update frame
1888 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1889 move_TSO(tso, new_tso);
1890 return (StgClosure *)new_tso;
1895 case RBH: // cf. BLACKHOLE_BQ
1897 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1898 to = copy(q,BLACKHOLE_sizeW(),stp);
1899 //ToDo: derive size etc from reverted IP
1900 //to = copy(q,size,stp);
1902 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1903 q, info_type(q), to, info_type(to)));
1908 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1909 to = copy(q,sizeofW(StgBlockedFetch),stp);
1911 belch("@@ evacuate: %p (%s) to %p (%s)",
1912 q, info_type(q), to, info_type(to)));
1919 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1920 to = copy(q,sizeofW(StgFetchMe),stp);
1922 belch("@@ evacuate: %p (%s) to %p (%s)",
1923 q, info_type(q), to, info_type(to)));
1927 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1928 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1930 belch("@@ evacuate: %p (%s) to %p (%s)",
1931 q, info_type(q), to, info_type(to)));
1936 barf("evacuate: strange closure type %d", (int)(info->type));
1942 /* -----------------------------------------------------------------------------
1943 move_TSO is called to update the TSO structure after it has been
1944 moved from one place to another.
1945 -------------------------------------------------------------------------- */
1948 move_TSO(StgTSO *src, StgTSO *dest)
1952 // relocate the stack pointers...
1953 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1954 dest->sp = (StgPtr)dest->sp + diff;
1955 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1957 relocate_stack(dest, diff);
1960 /* -----------------------------------------------------------------------------
1961 relocate_stack is called to update the linkage between
1962 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1964 -------------------------------------------------------------------------- */
1967 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1975 while ((P_)su < dest->stack + dest->stack_size) {
1976 switch (get_itbl(su)->type) {
1978 // GCC actually manages to common up these three cases!
1981 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1986 cf = (StgCatchFrame *)su;
1987 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1992 sf = (StgSeqFrame *)su;
1993 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
2002 barf("relocate_stack %d", (int)(get_itbl(su)->type));
2013 scavenge_srt(const StgInfoTable *info)
2015 StgClosure **srt, **srt_end;
2017 /* evacuate the SRT. If srt_len is zero, then there isn't an
2018 * srt field in the info table. That's ok, because we'll
2019 * never dereference it.
2021 srt = (StgClosure **)(info->srt);
2022 srt_end = srt + info->srt_len;
2023 for (; srt < srt_end; srt++) {
2024 /* Special-case to handle references to closures hiding out in DLLs, since
2025 double indirections required to get at those. The code generator knows
2026 which is which when generating the SRT, so it stores the (indirect)
2027 reference to the DLL closure in the table by first adding one to it.
2028 We check for this here, and undo the addition before evacuating it.
2030 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2031 closure that's fixed at link-time, and no extra magic is required.
2033 #ifdef ENABLE_WIN32_DLL_SUPPORT
2034 if ( (unsigned long)(*srt) & 0x1 ) {
2035 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2045 /* -----------------------------------------------------------------------------
2047 -------------------------------------------------------------------------- */
2050 scavengeTSO (StgTSO *tso)
2052 // chase the link field for any TSOs on the same queue
2053 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2054 if ( tso->why_blocked == BlockedOnMVar
2055 || tso->why_blocked == BlockedOnBlackHole
2056 || tso->why_blocked == BlockedOnException
2058 || tso->why_blocked == BlockedOnGA
2059 || tso->why_blocked == BlockedOnGA_NoSend
2062 tso->block_info.closure = evacuate(tso->block_info.closure);
2064 if ( tso->blocked_exceptions != NULL ) {
2065 tso->blocked_exceptions =
2066 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2068 // scavenge this thread's stack
2069 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2072 /* -----------------------------------------------------------------------------
2073 Scavenge a given step until there are no more objects in this step
2076 evac_gen is set by the caller to be either zero (for a step in a
2077 generation < N) or G where G is the generation of the step being
2080 We sometimes temporarily change evac_gen back to zero if we're
2081 scavenging a mutable object where early promotion isn't such a good
2083 -------------------------------------------------------------------------- */
2091 nat saved_evac_gen = evac_gen;
2096 failed_to_evac = rtsFalse;
2098 /* scavenge phase - standard breadth-first scavenging of the
2102 while (bd != stp->hp_bd || p < stp->hp) {
2104 // If we're at the end of this block, move on to the next block
2105 if (bd != stp->hp_bd && p == bd->free) {
2111 info = get_itbl((StgClosure *)p);
2112 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2115 switch (info->type) {
2118 /* treat MVars specially, because we don't want to evacuate the
2119 * mut_link field in the middle of the closure.
2122 StgMVar *mvar = ((StgMVar *)p);
2124 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2125 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2126 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2127 evac_gen = saved_evac_gen;
2128 recordMutable((StgMutClosure *)mvar);
2129 failed_to_evac = rtsFalse; // mutable.
2130 p += sizeofW(StgMVar);
2138 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2139 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2140 p += sizeofW(StgHeader) + 2;
2145 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2146 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2152 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2153 p += sizeofW(StgHeader) + 1;
2158 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2164 p += sizeofW(StgHeader) + 1;
2171 p += sizeofW(StgHeader) + 2;
2178 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2179 p += sizeofW(StgHeader) + 2;
2195 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2196 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2197 (StgClosure *)*p = evacuate((StgClosure *)*p);
2199 p += info->layout.payload.nptrs;
2204 if (stp->gen->no != 0) {
2207 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2208 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2209 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2212 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
2214 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2217 // We pretend that p has just been created.
2218 LDV_recordCreate((StgClosure *)p);
2222 case IND_OLDGEN_PERM:
2223 ((StgIndOldGen *)p)->indirectee =
2224 evacuate(((StgIndOldGen *)p)->indirectee);
2225 if (failed_to_evac) {
2226 failed_to_evac = rtsFalse;
2227 recordOldToNewPtrs((StgMutClosure *)p);
2229 p += sizeofW(StgIndOldGen);
2234 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2235 evac_gen = saved_evac_gen;
2236 recordMutable((StgMutClosure *)p);
2237 failed_to_evac = rtsFalse; // mutable anyhow
2238 p += sizeofW(StgMutVar);
2243 failed_to_evac = rtsFalse; // mutable anyhow
2244 p += sizeofW(StgMutVar);
2248 case SE_CAF_BLACKHOLE:
2251 p += BLACKHOLE_sizeW();
2256 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2257 (StgClosure *)bh->blocking_queue =
2258 evacuate((StgClosure *)bh->blocking_queue);
2259 recordMutable((StgMutClosure *)bh);
2260 failed_to_evac = rtsFalse;
2261 p += BLACKHOLE_sizeW();
2265 case THUNK_SELECTOR:
2267 StgSelector *s = (StgSelector *)p;
2268 s->selectee = evacuate(s->selectee);
2269 p += THUNK_SELECTOR_sizeW();
2273 case AP_UPD: // same as PAPs
2275 /* Treat a PAP just like a section of stack, not forgetting to
2276 * evacuate the function pointer too...
2279 StgPAP* pap = (StgPAP *)p;
2281 pap->fun = evacuate(pap->fun);
2282 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2283 p += pap_sizeW(pap);
2288 // nothing to follow
2289 p += arr_words_sizeW((StgArrWords *)p);
2293 // follow everything
2297 evac_gen = 0; // repeatedly mutable
2298 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2299 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2300 (StgClosure *)*p = evacuate((StgClosure *)*p);
2302 evac_gen = saved_evac_gen;
2303 recordMutable((StgMutClosure *)q);
2304 failed_to_evac = rtsFalse; // mutable anyhow.
2308 case MUT_ARR_PTRS_FROZEN:
2309 // follow everything
2313 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2314 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2315 (StgClosure *)*p = evacuate((StgClosure *)*p);
2317 // it's tempting to recordMutable() if failed_to_evac is
2318 // false, but that breaks some assumptions (eg. every
2319 // closure on the mutable list is supposed to have the MUT
2320 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2326 StgTSO *tso = (StgTSO *)p;
2329 evac_gen = saved_evac_gen;
2330 recordMutable((StgMutClosure *)tso);
2331 failed_to_evac = rtsFalse; // mutable anyhow.
2332 p += tso_sizeW(tso);
2337 case RBH: // cf. BLACKHOLE_BQ
2340 nat size, ptrs, nonptrs, vhs;
2342 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2344 StgRBH *rbh = (StgRBH *)p;
2345 (StgClosure *)rbh->blocking_queue =
2346 evacuate((StgClosure *)rbh->blocking_queue);
2347 recordMutable((StgMutClosure *)to);
2348 failed_to_evac = rtsFalse; // mutable anyhow.
2350 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2351 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2352 // ToDo: use size of reverted closure here!
2353 p += BLACKHOLE_sizeW();
2359 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2360 // follow the pointer to the node which is being demanded
2361 (StgClosure *)bf->node =
2362 evacuate((StgClosure *)bf->node);
2363 // follow the link to the rest of the blocking queue
2364 (StgClosure *)bf->link =
2365 evacuate((StgClosure *)bf->link);
2366 if (failed_to_evac) {
2367 failed_to_evac = rtsFalse;
2368 recordMutable((StgMutClosure *)bf);
2371 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2372 bf, info_type((StgClosure *)bf),
2373 bf->node, info_type(bf->node)));
2374 p += sizeofW(StgBlockedFetch);
2382 p += sizeofW(StgFetchMe);
2383 break; // nothing to do in this case
2385 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2387 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2388 (StgClosure *)fmbq->blocking_queue =
2389 evacuate((StgClosure *)fmbq->blocking_queue);
2390 if (failed_to_evac) {
2391 failed_to_evac = rtsFalse;
2392 recordMutable((StgMutClosure *)fmbq);
2395 belch("@@ scavenge: %p (%s) exciting, isn't it",
2396 p, info_type((StgClosure *)p)));
2397 p += sizeofW(StgFetchMeBlockingQueue);
2403 barf("scavenge: unimplemented/strange closure type %d @ %p",
2407 /* If we didn't manage to promote all the objects pointed to by
2408 * the current object, then we have to designate this object as
2409 * mutable (because it contains old-to-new generation pointers).
2411 if (failed_to_evac) {
2412 failed_to_evac = rtsFalse;
2413 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2421 /* -----------------------------------------------------------------------------
2422 Scavenge everything on the mark stack.
2424 This is slightly different from scavenge():
2425 - we don't walk linearly through the objects, so the scavenger
2426 doesn't need to advance the pointer on to the next object.
2427 -------------------------------------------------------------------------- */
2430 scavenge_mark_stack(void)
2436 evac_gen = oldest_gen->no;
2437 saved_evac_gen = evac_gen;
2440 while (!mark_stack_empty()) {
2441 p = pop_mark_stack();
2443 info = get_itbl((StgClosure *)p);
2444 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2447 switch (info->type) {
2450 /* treat MVars specially, because we don't want to evacuate the
2451 * mut_link field in the middle of the closure.
2454 StgMVar *mvar = ((StgMVar *)p);
2456 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2457 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2458 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2459 evac_gen = saved_evac_gen;
2460 failed_to_evac = rtsFalse; // mutable.
2468 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2469 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2479 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2504 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2505 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2506 (StgClosure *)*p = evacuate((StgClosure *)*p);
2512 // don't need to do anything here: the only possible case
2513 // is that we're in a 1-space compacting collector, with
2514 // no "old" generation.
2518 case IND_OLDGEN_PERM:
2519 ((StgIndOldGen *)p)->indirectee =
2520 evacuate(((StgIndOldGen *)p)->indirectee);
2521 if (failed_to_evac) {
2522 recordOldToNewPtrs((StgMutClosure *)p);
2524 failed_to_evac = rtsFalse;
2529 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2530 evac_gen = saved_evac_gen;
2531 failed_to_evac = rtsFalse;
2536 failed_to_evac = rtsFalse;
2540 case SE_CAF_BLACKHOLE:
2548 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2549 (StgClosure *)bh->blocking_queue =
2550 evacuate((StgClosure *)bh->blocking_queue);
2551 failed_to_evac = rtsFalse;
2555 case THUNK_SELECTOR:
2557 StgSelector *s = (StgSelector *)p;
2558 s->selectee = evacuate(s->selectee);
2562 case AP_UPD: // same as PAPs
2564 /* Treat a PAP just like a section of stack, not forgetting to
2565 * evacuate the function pointer too...
2568 StgPAP* pap = (StgPAP *)p;
2570 pap->fun = evacuate(pap->fun);
2571 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2576 // follow everything
2580 evac_gen = 0; // repeatedly mutable
2581 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2582 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2583 (StgClosure *)*p = evacuate((StgClosure *)*p);
2585 evac_gen = saved_evac_gen;
2586 failed_to_evac = rtsFalse; // mutable anyhow.
2590 case MUT_ARR_PTRS_FROZEN:
2591 // follow everything
2595 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2596 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2597 (StgClosure *)*p = evacuate((StgClosure *)*p);
2604 StgTSO *tso = (StgTSO *)p;
2607 evac_gen = saved_evac_gen;
2608 failed_to_evac = rtsFalse;
2613 case RBH: // cf. BLACKHOLE_BQ
2616 nat size, ptrs, nonptrs, vhs;
2618 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2620 StgRBH *rbh = (StgRBH *)p;
2621 (StgClosure *)rbh->blocking_queue =
2622 evacuate((StgClosure *)rbh->blocking_queue);
2623 recordMutable((StgMutClosure *)rbh);
2624 failed_to_evac = rtsFalse; // mutable anyhow.
2626 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2627 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2633 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2634 // follow the pointer to the node which is being demanded
2635 (StgClosure *)bf->node =
2636 evacuate((StgClosure *)bf->node);
2637 // follow the link to the rest of the blocking queue
2638 (StgClosure *)bf->link =
2639 evacuate((StgClosure *)bf->link);
2640 if (failed_to_evac) {
2641 failed_to_evac = rtsFalse;
2642 recordMutable((StgMutClosure *)bf);
2645 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2646 bf, info_type((StgClosure *)bf),
2647 bf->node, info_type(bf->node)));
2655 break; // nothing to do in this case
2657 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2659 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2660 (StgClosure *)fmbq->blocking_queue =
2661 evacuate((StgClosure *)fmbq->blocking_queue);
2662 if (failed_to_evac) {
2663 failed_to_evac = rtsFalse;
2664 recordMutable((StgMutClosure *)fmbq);
2667 belch("@@ scavenge: %p (%s) exciting, isn't it",
2668 p, info_type((StgClosure *)p)));
2674 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2678 if (failed_to_evac) {
2679 failed_to_evac = rtsFalse;
2680 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2683 // mark the next bit to indicate "scavenged"
2684 mark(q+1, Bdescr(q));
2686 } // while (!mark_stack_empty())
2688 // start a new linear scan if the mark stack overflowed at some point
2689 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2690 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2691 mark_stack_overflowed = rtsFalse;
2692 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2693 oldgen_scan = oldgen_scan_bd->start;
2696 if (oldgen_scan_bd) {
2697 // push a new thing on the mark stack
2699 // find a closure that is marked but not scavenged, and start
2701 while (oldgen_scan < oldgen_scan_bd->free
2702 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2706 if (oldgen_scan < oldgen_scan_bd->free) {
2708 // already scavenged?
2709 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2710 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2713 push_mark_stack(oldgen_scan);
2714 // ToDo: bump the linear scan by the actual size of the object
2715 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2719 oldgen_scan_bd = oldgen_scan_bd->link;
2720 if (oldgen_scan_bd != NULL) {
2721 oldgen_scan = oldgen_scan_bd->start;
2727 /* -----------------------------------------------------------------------------
2728 Scavenge one object.
2730 This is used for objects that are temporarily marked as mutable
2731 because they contain old-to-new generation pointers. Only certain
2732 objects can have this property.
2733 -------------------------------------------------------------------------- */
2736 scavenge_one(StgPtr p)
2738 const StgInfoTable *info;
2739 nat saved_evac_gen = evac_gen;
2742 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2743 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2745 info = get_itbl((StgClosure *)p);
2747 switch (info->type) {
2750 case FUN_1_0: // hardly worth specialising these guys
2770 case IND_OLDGEN_PERM:
2774 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2775 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2776 (StgClosure *)*q = evacuate((StgClosure *)*q);
2782 case SE_CAF_BLACKHOLE:
2787 case THUNK_SELECTOR:
2789 StgSelector *s = (StgSelector *)p;
2790 s->selectee = evacuate(s->selectee);
2795 // nothing to follow
2800 // follow everything
2803 evac_gen = 0; // repeatedly mutable
2804 recordMutable((StgMutClosure *)p);
2805 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2806 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2807 (StgClosure *)*p = evacuate((StgClosure *)*p);
2809 evac_gen = saved_evac_gen;
2810 failed_to_evac = rtsFalse;
2814 case MUT_ARR_PTRS_FROZEN:
2816 // follow everything
2819 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2820 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2821 (StgClosure *)*p = evacuate((StgClosure *)*p);
2828 StgTSO *tso = (StgTSO *)p;
2830 evac_gen = 0; // repeatedly mutable
2832 recordMutable((StgMutClosure *)tso);
2833 evac_gen = saved_evac_gen;
2834 failed_to_evac = rtsFalse;
2841 StgPAP* pap = (StgPAP *)p;
2842 pap->fun = evacuate(pap->fun);
2843 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2848 // This might happen if for instance a MUT_CONS was pointing to a
2849 // THUNK which has since been updated. The IND_OLDGEN will
2850 // be on the mutable list anyway, so we don't need to do anything
2855 barf("scavenge_one: strange object %d", (int)(info->type));
2858 no_luck = failed_to_evac;
2859 failed_to_evac = rtsFalse;
2863 /* -----------------------------------------------------------------------------
2864 Scavenging mutable lists.
2866 We treat the mutable list of each generation > N (i.e. all the
2867 generations older than the one being collected) as roots. We also
2868 remove non-mutable objects from the mutable list at this point.
2869 -------------------------------------------------------------------------- */
2872 scavenge_mut_once_list(generation *gen)
2874 const StgInfoTable *info;
2875 StgMutClosure *p, *next, *new_list;
2877 p = gen->mut_once_list;
2878 new_list = END_MUT_LIST;
2882 failed_to_evac = rtsFalse;
2884 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2886 // make sure the info pointer is into text space
2887 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2888 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2892 if (info->type==RBH)
2893 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2895 switch(info->type) {
2898 case IND_OLDGEN_PERM:
2900 /* Try to pull the indirectee into this generation, so we can
2901 * remove the indirection from the mutable list.
2903 ((StgIndOldGen *)p)->indirectee =
2904 evacuate(((StgIndOldGen *)p)->indirectee);
2906 #if 0 && defined(DEBUG)
2907 if (RtsFlags.DebugFlags.gc)
2908 /* Debugging code to print out the size of the thing we just
2912 StgPtr start = gen->steps[0].scan;
2913 bdescr *start_bd = gen->steps[0].scan_bd;
2915 scavenge(&gen->steps[0]);
2916 if (start_bd != gen->steps[0].scan_bd) {
2917 size += (P_)BLOCK_ROUND_UP(start) - start;
2918 start_bd = start_bd->link;
2919 while (start_bd != gen->steps[0].scan_bd) {
2920 size += BLOCK_SIZE_W;
2921 start_bd = start_bd->link;
2923 size += gen->steps[0].scan -
2924 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2926 size = gen->steps[0].scan - start;
2928 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2932 /* failed_to_evac might happen if we've got more than two
2933 * generations, we're collecting only generation 0, the
2934 * indirection resides in generation 2 and the indirectee is
2937 if (failed_to_evac) {
2938 failed_to_evac = rtsFalse;
2939 p->mut_link = new_list;
2942 /* the mut_link field of an IND_STATIC is overloaded as the
2943 * static link field too (it just so happens that we don't need
2944 * both at the same time), so we need to NULL it out when
2945 * removing this object from the mutable list because the static
2946 * link fields are all assumed to be NULL before doing a major
2954 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2955 * it from the mutable list if possible by promoting whatever it
2958 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2959 /* didn't manage to promote everything, so put the
2960 * MUT_CONS back on the list.
2962 p->mut_link = new_list;
2968 // shouldn't have anything else on the mutables list
2969 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2973 gen->mut_once_list = new_list;
2978 scavenge_mutable_list(generation *gen)
2980 const StgInfoTable *info;
2981 StgMutClosure *p, *next;
2983 p = gen->saved_mut_list;
2987 failed_to_evac = rtsFalse;
2989 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2991 // make sure the info pointer is into text space
2992 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2993 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2997 if (info->type==RBH)
2998 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3000 switch(info->type) {
3003 // follow everything
3004 p->mut_link = gen->mut_list;
3009 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3010 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3011 (StgClosure *)*q = evacuate((StgClosure *)*q);
3016 // Happens if a MUT_ARR_PTRS in the old generation is frozen
3017 case MUT_ARR_PTRS_FROZEN:
3022 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3023 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3024 (StgClosure *)*q = evacuate((StgClosure *)*q);
3028 if (failed_to_evac) {
3029 failed_to_evac = rtsFalse;
3030 mkMutCons((StgClosure *)p, gen);
3036 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3037 p->mut_link = gen->mut_list;
3043 StgMVar *mvar = (StgMVar *)p;
3044 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
3045 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
3046 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
3047 p->mut_link = gen->mut_list;
3054 StgTSO *tso = (StgTSO *)p;
3058 /* Don't take this TSO off the mutable list - it might still
3059 * point to some younger objects (because we set evac_gen to 0
3062 tso->mut_link = gen->mut_list;
3063 gen->mut_list = (StgMutClosure *)tso;
3069 StgBlockingQueue *bh = (StgBlockingQueue *)p;
3070 (StgClosure *)bh->blocking_queue =
3071 evacuate((StgClosure *)bh->blocking_queue);
3072 p->mut_link = gen->mut_list;
3077 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3080 case IND_OLDGEN_PERM:
3081 /* Try to pull the indirectee into this generation, so we can
3082 * remove the indirection from the mutable list.
3085 ((StgIndOldGen *)p)->indirectee =
3086 evacuate(((StgIndOldGen *)p)->indirectee);
3089 if (failed_to_evac) {
3090 failed_to_evac = rtsFalse;
3091 p->mut_link = gen->mut_once_list;
3092 gen->mut_once_list = p;
3099 // HWL: check whether all of these are necessary
3101 case RBH: // cf. BLACKHOLE_BQ
3103 // nat size, ptrs, nonptrs, vhs;
3105 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3106 StgRBH *rbh = (StgRBH *)p;
3107 (StgClosure *)rbh->blocking_queue =
3108 evacuate((StgClosure *)rbh->blocking_queue);
3109 if (failed_to_evac) {
3110 failed_to_evac = rtsFalse;
3111 recordMutable((StgMutClosure *)rbh);
3113 // ToDo: use size of reverted closure here!
3114 p += BLACKHOLE_sizeW();
3120 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3121 // follow the pointer to the node which is being demanded
3122 (StgClosure *)bf->node =
3123 evacuate((StgClosure *)bf->node);
3124 // follow the link to the rest of the blocking queue
3125 (StgClosure *)bf->link =
3126 evacuate((StgClosure *)bf->link);
3127 if (failed_to_evac) {
3128 failed_to_evac = rtsFalse;
3129 recordMutable((StgMutClosure *)bf);
3131 p += sizeofW(StgBlockedFetch);
3137 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3140 p += sizeofW(StgFetchMe);
3141 break; // nothing to do in this case
3143 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3145 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3146 (StgClosure *)fmbq->blocking_queue =
3147 evacuate((StgClosure *)fmbq->blocking_queue);
3148 if (failed_to_evac) {
3149 failed_to_evac = rtsFalse;
3150 recordMutable((StgMutClosure *)fmbq);
3152 p += sizeofW(StgFetchMeBlockingQueue);
3158 // shouldn't have anything else on the mutables list
3159 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3166 scavenge_static(void)
3168 StgClosure* p = static_objects;
3169 const StgInfoTable *info;
3171 /* Always evacuate straight to the oldest generation for static
3173 evac_gen = oldest_gen->no;
3175 /* keep going until we've scavenged all the objects on the linked
3177 while (p != END_OF_STATIC_LIST) {
3181 if (info->type==RBH)
3182 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3184 // make sure the info pointer is into text space
3185 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3186 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3188 /* Take this object *off* the static_objects list,
3189 * and put it on the scavenged_static_objects list.
3191 static_objects = STATIC_LINK(info,p);
3192 STATIC_LINK(info,p) = scavenged_static_objects;
3193 scavenged_static_objects = p;
3195 switch (info -> type) {
3199 StgInd *ind = (StgInd *)p;
3200 ind->indirectee = evacuate(ind->indirectee);
3202 /* might fail to evacuate it, in which case we have to pop it
3203 * back on the mutable list (and take it off the
3204 * scavenged_static list because the static link and mut link
3205 * pointers are one and the same).
3207 if (failed_to_evac) {
3208 failed_to_evac = rtsFalse;
3209 scavenged_static_objects = IND_STATIC_LINK(p);
3210 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3211 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3225 next = (P_)p->payload + info->layout.payload.ptrs;
3226 // evacuate the pointers
3227 for (q = (P_)p->payload; q < next; q++) {
3228 (StgClosure *)*q = evacuate((StgClosure *)*q);
3234 barf("scavenge_static: strange closure %d", (int)(info->type));
3237 ASSERT(failed_to_evac == rtsFalse);
3239 /* get the next static object from the list. Remember, there might
3240 * be more stuff on this list now that we've done some evacuating!
3241 * (static_objects is a global)
3247 /* -----------------------------------------------------------------------------
3248 scavenge_stack walks over a section of stack and evacuates all the
3249 objects pointed to by it. We can use the same code for walking
3250 PAPs, since these are just sections of copied stack.
3251 -------------------------------------------------------------------------- */
3254 scavenge_stack(StgPtr p, StgPtr stack_end)
3257 const StgInfoTable* info;
3260 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3263 * Each time around this loop, we are looking at a chunk of stack
3264 * that starts with either a pending argument section or an
3265 * activation record.
3268 while (p < stack_end) {
3271 // If we've got a tag, skip over that many words on the stack
3272 if (IS_ARG_TAG((W_)q)) {
3277 /* Is q a pointer to a closure?
3279 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3281 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3282 ASSERT(closure_STATIC((StgClosure *)q));
3284 // otherwise, must be a pointer into the allocation space.
3287 (StgClosure *)*p = evacuate((StgClosure *)q);
3293 * Otherwise, q must be the info pointer of an activation
3294 * record. All activation records have 'bitmap' style layout
3297 info = get_itbl((StgClosure *)p);
3299 switch (info->type) {
3301 // Dynamic bitmap: the mask is stored on the stack
3303 bitmap = ((StgRetDyn *)p)->liveness;
3304 p = (P_)&((StgRetDyn *)p)->payload[0];
3307 // probably a slow-entry point return address:
3315 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3316 old_p, p, old_p+1));
3318 p++; // what if FHS!=1 !? -- HWL
3323 /* Specialised code for update frames, since they're so common.
3324 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3325 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3329 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3331 p += sizeofW(StgUpdateFrame);
3334 frame->updatee = evacuate(frame->updatee);
3336 #else // specialised code for update frames, not sure if it's worth it.
3338 nat type = get_itbl(frame->updatee)->type;
3340 if (type == EVACUATED) {
3341 frame->updatee = evacuate(frame->updatee);
3344 bdescr *bd = Bdescr((P_)frame->updatee);
3346 if (bd->gen_no > N) {
3347 if (bd->gen_no < evac_gen) {
3348 failed_to_evac = rtsTrue;
3353 // Don't promote blackholes
3355 if (!(stp->gen_no == 0 &&
3357 stp->no == stp->gen->n_steps-1)) {
3364 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3365 sizeofW(StgHeader), stp);
3366 frame->updatee = to;
3369 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3370 frame->updatee = to;
3371 recordMutable((StgMutClosure *)to);
3374 /* will never be SE_{,CAF_}BLACKHOLE, since we
3375 don't push an update frame for single-entry thunks. KSW 1999-01. */
3376 barf("scavenge_stack: UPDATE_FRAME updatee");
3382 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3389 bitmap = info->layout.bitmap;
3391 // this assumes that the payload starts immediately after the info-ptr
3393 while (bitmap != 0) {
3394 if ((bitmap & 1) == 0) {
3395 (StgClosure *)*p = evacuate((StgClosure *)*p);
3398 bitmap = bitmap >> 1;
3405 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3410 StgLargeBitmap *large_bitmap;
3413 large_bitmap = info->layout.large_bitmap;
3416 for (i=0; i<large_bitmap->size; i++) {
3417 bitmap = large_bitmap->bitmap[i];
3418 q = p + BITS_IN(W_);
3419 while (bitmap != 0) {
3420 if ((bitmap & 1) == 0) {
3421 (StgClosure *)*p = evacuate((StgClosure *)*p);
3424 bitmap = bitmap >> 1;
3426 if (i+1 < large_bitmap->size) {
3428 (StgClosure *)*p = evacuate((StgClosure *)*p);
3434 // and don't forget to follow the SRT
3439 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3444 /*-----------------------------------------------------------------------------
3445 scavenge the large object list.
3447 evac_gen set by caller; similar games played with evac_gen as with
3448 scavenge() - see comment at the top of scavenge(). Most large
3449 objects are (repeatedly) mutable, so most of the time evac_gen will
3451 --------------------------------------------------------------------------- */
3454 scavenge_large(step *stp)
3459 bd = stp->new_large_objects;
3461 for (; bd != NULL; bd = stp->new_large_objects) {
3463 /* take this object *off* the large objects list and put it on
3464 * the scavenged large objects list. This is so that we can
3465 * treat new_large_objects as a stack and push new objects on
3466 * the front when evacuating.
3468 stp->new_large_objects = bd->link;
3469 dbl_link_onto(bd, &stp->scavenged_large_objects);
3471 // update the block count in this step.
3472 stp->n_scavenged_large_blocks += bd->blocks;
3475 if (scavenge_one(p)) {
3476 mkMutCons((StgClosure *)p, stp->gen);
3481 /* -----------------------------------------------------------------------------
3482 Initialising the static object & mutable lists
3483 -------------------------------------------------------------------------- */
3486 zero_static_object_list(StgClosure* first_static)
3490 const StgInfoTable *info;
3492 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3494 link = STATIC_LINK(info, p);
3495 STATIC_LINK(info,p) = NULL;
3499 /* This function is only needed because we share the mutable link
3500 * field with the static link field in an IND_STATIC, so we have to
3501 * zero the mut_link field before doing a major GC, which needs the
3502 * static link field.
3504 * It doesn't do any harm to zero all the mutable link fields on the
3509 zero_mutable_list( StgMutClosure *first )
3511 StgMutClosure *next, *c;
3513 for (c = first; c != END_MUT_LIST; c = next) {
3519 /* -----------------------------------------------------------------------------
3521 -------------------------------------------------------------------------- */
3528 for (c = (StgIndStatic *)caf_list; c != NULL;
3529 c = (StgIndStatic *)c->static_link)
3531 c->header.info = c->saved_info;
3532 c->saved_info = NULL;
3533 // could, but not necessary: c->static_link = NULL;
3539 markCAFs( evac_fn evac )
3543 for (c = (StgIndStatic *)caf_list; c != NULL;
3544 c = (StgIndStatic *)c->static_link)
3546 evac(&c->indirectee);
3550 /* -----------------------------------------------------------------------------
3551 Sanity code for CAF garbage collection.
3553 With DEBUG turned on, we manage a CAF list in addition to the SRT
3554 mechanism. After GC, we run down the CAF list and blackhole any
3555 CAFs which have been garbage collected. This means we get an error
3556 whenever the program tries to enter a garbage collected CAF.
3558 Any garbage collected CAFs are taken off the CAF list at the same
3560 -------------------------------------------------------------------------- */
3562 #if 0 && defined(DEBUG)
3569 const StgInfoTable *info;
3580 ASSERT(info->type == IND_STATIC);
3582 if (STATIC_LINK(info,p) == NULL) {
3583 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3585 SET_INFO(p,&stg_BLACKHOLE_info);
3586 p = STATIC_LINK2(info,p);
3590 pp = &STATIC_LINK2(info,p);
3597 // belch("%d CAFs live", i);
3602 /* -----------------------------------------------------------------------------
3605 Whenever a thread returns to the scheduler after possibly doing
3606 some work, we have to run down the stack and black-hole all the
3607 closures referred to by update frames.
3608 -------------------------------------------------------------------------- */
3611 threadLazyBlackHole(StgTSO *tso)
3613 StgUpdateFrame *update_frame;
3614 StgBlockingQueue *bh;
3617 stack_end = &tso->stack[tso->stack_size];
3618 update_frame = tso->su;
3621 switch (get_itbl(update_frame)->type) {
3624 update_frame = ((StgCatchFrame *)update_frame)->link;
3628 bh = (StgBlockingQueue *)update_frame->updatee;
3630 /* if the thunk is already blackholed, it means we've also
3631 * already blackholed the rest of the thunks on this stack,
3632 * so we can stop early.
3634 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3635 * don't interfere with this optimisation.
3637 if (bh->header.info == &stg_BLACKHOLE_info) {
3641 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3642 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3643 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3644 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3648 // We pretend that bh is now dead.
3649 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
3651 SET_INFO(bh,&stg_BLACKHOLE_info);
3654 // We pretend that bh has just been created.
3655 LDV_recordCreate(bh);
3659 update_frame = update_frame->link;
3663 update_frame = ((StgSeqFrame *)update_frame)->link;
3669 barf("threadPaused");
3675 /* -----------------------------------------------------------------------------
3678 * Code largely pinched from old RTS, then hacked to bits. We also do
3679 * lazy black holing here.
3681 * -------------------------------------------------------------------------- */
3684 threadSqueezeStack(StgTSO *tso)
3686 lnat displacement = 0;
3687 StgUpdateFrame *frame;
3688 StgUpdateFrame *next_frame; // Temporally next
3689 StgUpdateFrame *prev_frame; // Temporally previous
3691 rtsBool prev_was_update_frame;
3693 StgUpdateFrame *top_frame;
3694 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3696 void printObj( StgClosure *obj ); // from Printer.c
3698 top_frame = tso->su;
3701 bottom = &(tso->stack[tso->stack_size]);
3704 /* There must be at least one frame, namely the STOP_FRAME.
3706 ASSERT((P_)frame < bottom);
3708 /* Walk down the stack, reversing the links between frames so that
3709 * we can walk back up as we squeeze from the bottom. Note that
3710 * next_frame and prev_frame refer to next and previous as they were
3711 * added to the stack, rather than the way we see them in this
3712 * walk. (It makes the next loop less confusing.)
3714 * Stop if we find an update frame pointing to a black hole
3715 * (see comment in threadLazyBlackHole()).
3719 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3720 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3721 prev_frame = frame->link;
3722 frame->link = next_frame;
3727 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3728 printObj((StgClosure *)prev_frame);
3729 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3732 switch (get_itbl(frame)->type) {
3735 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3748 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3750 printObj((StgClosure *)prev_frame);
3753 if (get_itbl(frame)->type == UPDATE_FRAME
3754 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3759 /* Now, we're at the bottom. Frame points to the lowest update
3760 * frame on the stack, and its link actually points to the frame
3761 * above. We have to walk back up the stack, squeezing out empty
3762 * update frames and turning the pointers back around on the way
3765 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3766 * we never want to eliminate it anyway. Just walk one step up
3767 * before starting to squeeze. When you get to the topmost frame,
3768 * remember that there are still some words above it that might have
3775 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3778 * Loop through all of the frames (everything except the very
3779 * bottom). Things are complicated by the fact that we have
3780 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3781 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3783 while (frame != NULL) {
3785 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3786 rtsBool is_update_frame;
3788 next_frame = frame->link;
3789 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3792 * 1. both the previous and current frame are update frames
3793 * 2. the current frame is empty
3795 if (prev_was_update_frame && is_update_frame &&
3796 (P_)prev_frame == frame_bottom + displacement) {
3798 // Now squeeze out the current frame
3799 StgClosure *updatee_keep = prev_frame->updatee;
3800 StgClosure *updatee_bypass = frame->updatee;
3803 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3807 /* Deal with blocking queues. If both updatees have blocked
3808 * threads, then we should merge the queues into the update
3809 * frame that we're keeping.
3811 * Alternatively, we could just wake them up: they'll just go
3812 * straight to sleep on the proper blackhole! This is less code
3813 * and probably less bug prone, although it's probably much
3816 #if 0 // do it properly...
3817 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3818 # error Unimplemented lazy BH warning. (KSW 1999-01)
3820 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3821 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3823 // Sigh. It has one. Don't lose those threads!
3824 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3825 // Urgh. Two queues. Merge them.
3826 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3828 while (keep_tso->link != END_TSO_QUEUE) {
3829 keep_tso = keep_tso->link;
3831 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3834 // For simplicity, just swap the BQ for the BH
3835 P_ temp = updatee_keep;
3837 updatee_keep = updatee_bypass;
3838 updatee_bypass = temp;
3840 // Record the swap in the kept frame (below)
3841 prev_frame->updatee = updatee_keep;
3846 TICK_UPD_SQUEEZED();
3847 /* wasn't there something about update squeezing and ticky to be
3848 * sorted out? oh yes: we aren't counting each enter properly
3849 * in this case. See the log somewhere. KSW 1999-04-21
3851 * Check two things: that the two update frames don't point to
3852 * the same object, and that the updatee_bypass isn't already an
3853 * indirection. Both of these cases only happen when we're in a
3854 * block hole-style loop (and there are multiple update frames
3855 * on the stack pointing to the same closure), but they can both
3856 * screw us up if we don't check.
3858 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3859 // this wakes the threads up
3860 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3863 sp = (P_)frame - 1; // sp = stuff to slide
3864 displacement += sizeofW(StgUpdateFrame);
3867 // No squeeze for this frame
3868 sp = frame_bottom - 1; // Keep the current frame
3870 /* Do lazy black-holing.
3872 if (is_update_frame) {
3873 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3874 if (bh->header.info != &stg_BLACKHOLE_info &&
3875 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3876 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3877 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3878 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3881 /* zero out the slop so that the sanity checker can tell
3882 * where the next closure is.
3885 StgInfoTable *info = get_itbl(bh);
3886 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3887 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3888 * info is used for a different purpose, and it's exactly the
3889 * same size as a BLACKHOLE in any case.
3891 if (info->type != THUNK_SELECTOR) {
3892 for (i = np; i < np + nw; i++) {
3893 ((StgClosure *)bh)->payload[i] = 0;
3900 // We pretend that bh is now dead.
3901 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
3904 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
3906 SET_INFO(bh,&stg_BLACKHOLE_info);
3909 // We pretend that bh has just been created.
3910 LDV_recordCreate(bh);
3915 // Fix the link in the current frame (should point to the frame below)
3916 frame->link = prev_frame;
3917 prev_was_update_frame = is_update_frame;
3920 // Now slide all words from sp up to the next frame
3922 if (displacement > 0) {
3923 P_ next_frame_bottom;
3925 if (next_frame != NULL)
3926 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3928 next_frame_bottom = tso->sp - 1;
3932 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3936 while (sp >= next_frame_bottom) {
3937 sp[displacement] = *sp;
3941 (P_)prev_frame = (P_)frame + displacement;
3945 tso->sp += displacement;
3946 tso->su = prev_frame;
3949 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3950 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3955 /* -----------------------------------------------------------------------------
3958 * We have to prepare for GC - this means doing lazy black holing
3959 * here. We also take the opportunity to do stack squeezing if it's
3961 * -------------------------------------------------------------------------- */
3963 threadPaused(StgTSO *tso)
3965 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3966 threadSqueezeStack(tso); // does black holing too
3968 threadLazyBlackHole(tso);
3971 /* -----------------------------------------------------------------------------
3973 * -------------------------------------------------------------------------- */
3977 printMutOnceList(generation *gen)
3979 StgMutClosure *p, *next;
3981 p = gen->mut_once_list;
3984 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3985 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3986 fprintf(stderr, "%p (%s), ",
3987 p, info_type((StgClosure *)p));
3989 fputc('\n', stderr);
3993 printMutableList(generation *gen)
3995 StgMutClosure *p, *next;
4000 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
4001 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
4002 fprintf(stderr, "%p (%s), ",
4003 p, info_type((StgClosure *)p));
4005 fputc('\n', stderr);
4008 static inline rtsBool
4009 maybeLarge(StgClosure *closure)
4011 StgInfoTable *info = get_itbl(closure);
4013 /* closure types that may be found on the new_large_objects list;
4014 see scavenge_large */
4015 return (info->type == MUT_ARR_PTRS ||
4016 info->type == MUT_ARR_PTRS_FROZEN ||
4017 info->type == TSO ||
4018 info->type == ARR_WORDS);