1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.131 2002/03/07 17:53:05 keithw Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
15 #include "StoragePriv.h"
18 #include "SchedAPI.h" // for ReverCAFs prototype
20 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
30 #if defined(GRAN) || defined(PAR)
31 # include "GranSimRts.h"
32 # include "ParallelRts.h"
36 # include "ParallelDebug.h"
41 #if defined(RTS_GTK_FRONTPANEL)
42 #include "FrontPanel.h"
45 #include "RetainerProfile.h"
46 #include "LdvProfile.h"
48 /* STATIC OBJECT LIST.
51 * We maintain a linked list of static objects that are still live.
52 * The requirements for this list are:
54 * - we need to scan the list while adding to it, in order to
55 * scavenge all the static objects (in the same way that
56 * breadth-first scavenging works for dynamic objects).
58 * - we need to be able to tell whether an object is already on
59 * the list, to break loops.
61 * Each static object has a "static link field", which we use for
62 * linking objects on to the list. We use a stack-type list, consing
63 * objects on the front as they are added (this means that the
64 * scavenge phase is depth-first, not breadth-first, but that
67 * A separate list is kept for objects that have been scavenged
68 * already - this is so that we can zero all the marks afterwards.
70 * An object is on the list if its static link field is non-zero; this
71 * means that we have to mark the end of the list with '1', not NULL.
73 * Extra notes for generational GC:
75 * Each generation has a static object list associated with it. When
76 * collecting generations up to N, we treat the static object lists
77 * from generations > N as roots.
79 * We build up a static object list while collecting generations 0..N,
80 * which is then appended to the static object list of generation N+1.
82 StgClosure* static_objects; // live static objects
83 StgClosure* scavenged_static_objects; // static objects scavenged so far
85 /* N is the oldest generation being collected, where the generations
86 * are numbered starting at 0. A major GC (indicated by the major_gc
87 * flag) is when we're collecting all generations. We only attempt to
88 * deal with static objects and GC CAFs when doing a major GC.
91 static rtsBool major_gc;
93 /* Youngest generation that objects should be evacuated to in
94 * evacuate(). (Logically an argument to evacuate, but it's static
95 * a lot of the time so we optimise it into a global variable).
101 StgWeak *old_weak_ptr_list; // also pending finaliser list
102 static rtsBool weak_done; // all done for this pass
104 /* List of all threads during GC
106 static StgTSO *old_all_threads;
107 static StgTSO *resurrected_threads;
109 /* Flag indicating failure to evacuate an object to the desired
112 static rtsBool failed_to_evac;
114 /* Old to-space (used for two-space collector only)
116 bdescr *old_to_blocks;
118 /* Data used for allocation area sizing.
120 lnat new_blocks; // blocks allocated during this GC
121 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
123 /* Used to avoid long recursion due to selector thunks
125 lnat thunk_selector_depth = 0;
126 #define MAX_THUNK_SELECTOR_DEPTH 256
128 /* -----------------------------------------------------------------------------
129 Static function declarations
130 -------------------------------------------------------------------------- */
132 static void mark_root ( StgClosure **root );
133 static StgClosure * evacuate ( StgClosure *q );
134 static void zero_static_object_list ( StgClosure* first_static );
135 static void zero_mutable_list ( StgMutClosure *first );
137 static rtsBool traverse_weak_ptr_list ( void );
138 static void mark_weak_ptr_list ( StgWeak **list );
140 static void scavenge ( step * );
141 static void scavenge_mark_stack ( void );
142 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
143 static rtsBool scavenge_one ( StgPtr p );
144 static void scavenge_large ( step * );
145 static void scavenge_static ( void );
146 static void scavenge_mutable_list ( generation *g );
147 static void scavenge_mut_once_list ( generation *g );
149 #if 0 && defined(DEBUG)
150 static void gcCAFs ( void );
153 /* -----------------------------------------------------------------------------
154 inline functions etc. for dealing with the mark bitmap & stack.
155 -------------------------------------------------------------------------- */
157 #define MARK_STACK_BLOCKS 4
159 static bdescr *mark_stack_bdescr;
160 static StgPtr *mark_stack;
161 static StgPtr *mark_sp;
162 static StgPtr *mark_splim;
164 // Flag and pointers used for falling back to a linear scan when the
165 // mark stack overflows.
166 static rtsBool mark_stack_overflowed;
167 static bdescr *oldgen_scan_bd;
168 static StgPtr oldgen_scan;
170 static inline rtsBool
171 mark_stack_empty(void)
173 return mark_sp == mark_stack;
176 static inline rtsBool
177 mark_stack_full(void)
179 return mark_sp >= mark_splim;
183 reset_mark_stack(void)
185 mark_sp = mark_stack;
189 push_mark_stack(StgPtr p)
200 /* -----------------------------------------------------------------------------
203 For garbage collecting generation N (and all younger generations):
205 - follow all pointers in the root set. the root set includes all
206 mutable objects in all steps in all generations.
208 - for each pointer, evacuate the object it points to into either
209 + to-space in the next higher step in that generation, if one exists,
210 + if the object's generation == N, then evacuate it to the next
211 generation if one exists, or else to-space in the current
213 + if the object's generation < N, then evacuate it to to-space
214 in the next generation.
216 - repeatedly scavenge to-space from each step in each generation
217 being collected until no more objects can be evacuated.
219 - free from-space in each step, and set from-space = to-space.
221 Locks held: sched_mutex
223 -------------------------------------------------------------------------- */
226 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
230 lnat live, allocated, collected = 0, copied = 0;
231 lnat oldgen_saved_blocks = 0;
235 CostCentreStack *prev_CCS;
238 #if defined(DEBUG) && defined(GRAN)
239 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
243 // tell the stats department that we've started a GC
246 // Init stats and print par specific (timing) info
247 PAR_TICKY_PAR_START();
249 // attribute any costs to CCS_GC
255 /* Approximate how much we allocated.
256 * Todo: only when generating stats?
258 allocated = calcAllocated();
260 /* Figure out which generation to collect
262 if (force_major_gc) {
263 N = RtsFlags.GcFlags.generations - 1;
267 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
268 if (generations[g].steps[0].n_blocks +
269 generations[g].steps[0].n_large_blocks
270 >= generations[g].max_blocks) {
274 major_gc = (N == RtsFlags.GcFlags.generations-1);
277 #ifdef RTS_GTK_FRONTPANEL
278 if (RtsFlags.GcFlags.frontpanel) {
279 updateFrontPanelBeforeGC(N);
283 // check stack sanity *before* GC (ToDo: check all threads)
285 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
287 IF_DEBUG(sanity, checkFreeListSanity());
289 /* Initialise the static object lists
291 static_objects = END_OF_STATIC_LIST;
292 scavenged_static_objects = END_OF_STATIC_LIST;
294 /* zero the mutable list for the oldest generation (see comment by
295 * zero_mutable_list below).
298 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
301 /* Save the old to-space if we're doing a two-space collection
303 if (RtsFlags.GcFlags.generations == 1) {
304 old_to_blocks = g0s0->to_blocks;
305 g0s0->to_blocks = NULL;
308 /* Keep a count of how many new blocks we allocated during this GC
309 * (used for resizing the allocation area, later).
313 /* Initialise to-space in all the generations/steps that we're
316 for (g = 0; g <= N; g++) {
317 generations[g].mut_once_list = END_MUT_LIST;
318 generations[g].mut_list = END_MUT_LIST;
320 for (s = 0; s < generations[g].n_steps; s++) {
322 // generation 0, step 0 doesn't need to-space
323 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
327 /* Get a free block for to-space. Extra blocks will be chained on
331 stp = &generations[g].steps[s];
332 ASSERT(stp->gen_no == g);
333 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
337 bd->flags = BF_EVACUATED; // it's a to-space block
339 stp->hpLim = stp->hp + BLOCK_SIZE_W;
342 stp->n_to_blocks = 1;
343 stp->scan = bd->start;
345 stp->new_large_objects = NULL;
346 stp->scavenged_large_objects = NULL;
347 stp->n_scavenged_large_blocks = 0;
349 // mark the large objects as not evacuated yet
350 for (bd = stp->large_objects; bd; bd = bd->link) {
351 bd->flags = BF_LARGE;
354 // for a compacted step, we need to allocate the bitmap
355 if (stp->is_compacted) {
356 nat bitmap_size; // in bytes
357 bdescr *bitmap_bdescr;
360 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
362 if (bitmap_size > 0) {
363 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
365 stp->bitmap = bitmap_bdescr;
366 bitmap = bitmap_bdescr->start;
368 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
369 bitmap_size, bitmap););
371 // don't forget to fill it with zeros!
372 memset(bitmap, 0, bitmap_size);
374 // for each block in this step, point to its bitmap from the
376 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
377 bd->u.bitmap = bitmap;
378 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
385 /* make sure the older generations have at least one block to
386 * allocate into (this makes things easier for copy(), see below.
388 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
389 for (s = 0; s < generations[g].n_steps; s++) {
390 stp = &generations[g].steps[s];
391 if (stp->hp_bd == NULL) {
392 ASSERT(stp->blocks == NULL);
397 bd->flags = 0; // *not* a to-space block or a large object
399 stp->hpLim = stp->hp + BLOCK_SIZE_W;
405 /* Set the scan pointer for older generations: remember we
406 * still have to scavenge objects that have been promoted. */
408 stp->scan_bd = stp->hp_bd;
409 stp->to_blocks = NULL;
410 stp->n_to_blocks = 0;
411 stp->new_large_objects = NULL;
412 stp->scavenged_large_objects = NULL;
413 stp->n_scavenged_large_blocks = 0;
417 /* Allocate a mark stack if we're doing a major collection.
420 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
421 mark_stack = (StgPtr *)mark_stack_bdescr->start;
422 mark_sp = mark_stack;
423 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
425 mark_stack_bdescr = NULL;
428 /* -----------------------------------------------------------------------
429 * follow all the roots that we know about:
430 * - mutable lists from each generation > N
431 * we want to *scavenge* these roots, not evacuate them: they're not
432 * going to move in this GC.
433 * Also: do them in reverse generation order. This is because we
434 * often want to promote objects that are pointed to by older
435 * generations early, so we don't have to repeatedly copy them.
436 * Doing the generations in reverse order ensures that we don't end
437 * up in the situation where we want to evac an object to gen 3 and
438 * it has already been evaced to gen 2.
442 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
443 generations[g].saved_mut_list = generations[g].mut_list;
444 generations[g].mut_list = END_MUT_LIST;
447 // Do the mut-once lists first
448 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
449 IF_PAR_DEBUG(verbose,
450 printMutOnceList(&generations[g]));
451 scavenge_mut_once_list(&generations[g]);
453 for (st = generations[g].n_steps-1; st >= 0; st--) {
454 scavenge(&generations[g].steps[st]);
458 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
459 IF_PAR_DEBUG(verbose,
460 printMutableList(&generations[g]));
461 scavenge_mutable_list(&generations[g]);
463 for (st = generations[g].n_steps-1; st >= 0; st--) {
464 scavenge(&generations[g].steps[st]);
469 /* follow roots from the CAF list (used by GHCi)
474 /* follow all the roots that the application knows about.
477 get_roots(mark_root);
480 /* And don't forget to mark the TSO if we got here direct from
482 /* Not needed in a seq version?
484 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
488 // Mark the entries in the GALA table of the parallel system
489 markLocalGAs(major_gc);
490 // Mark all entries on the list of pending fetches
491 markPendingFetches(major_gc);
494 /* Mark the weak pointer list, and prepare to detect dead weak
497 mark_weak_ptr_list(&weak_ptr_list);
498 old_weak_ptr_list = weak_ptr_list;
499 weak_ptr_list = NULL;
500 weak_done = rtsFalse;
502 /* The all_threads list is like the weak_ptr_list.
503 * See traverse_weak_ptr_list() for the details.
505 old_all_threads = all_threads;
506 all_threads = END_TSO_QUEUE;
507 resurrected_threads = END_TSO_QUEUE;
509 /* Mark the stable pointer table.
511 markStablePtrTable(mark_root);
515 /* ToDo: To fix the caf leak, we need to make the commented out
516 * parts of this code do something sensible - as described in
519 extern void markHugsObjects(void);
524 /* -------------------------------------------------------------------------
525 * Repeatedly scavenge all the areas we know about until there's no
526 * more scavenging to be done.
533 // scavenge static objects
534 if (major_gc && static_objects != END_OF_STATIC_LIST) {
535 IF_DEBUG(sanity, checkStaticObjects(static_objects));
539 /* When scavenging the older generations: Objects may have been
540 * evacuated from generations <= N into older generations, and we
541 * need to scavenge these objects. We're going to try to ensure that
542 * any evacuations that occur move the objects into at least the
543 * same generation as the object being scavenged, otherwise we
544 * have to create new entries on the mutable list for the older
548 // scavenge each step in generations 0..maxgen
554 // scavenge objects in compacted generation
555 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
556 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
557 scavenge_mark_stack();
561 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
562 for (st = generations[gen].n_steps; --st >= 0; ) {
563 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
566 stp = &generations[gen].steps[st];
568 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
573 if (stp->new_large_objects != NULL) {
582 if (flag) { goto loop; }
585 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
591 // Reconstruct the Global Address tables used in GUM
592 rebuildGAtables(major_gc);
593 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
596 // Now see which stable names are still alive.
599 // Tidy the end of the to-space chains
600 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
601 for (s = 0; s < generations[g].n_steps; s++) {
602 stp = &generations[g].steps[s];
603 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
604 stp->hp_bd->free = stp->hp;
605 stp->hp_bd->link = NULL;
611 // We call processHeapClosureForDead() on every closure destroyed during
612 // the current garbage collection, so we invoke LdvCensusForDead().
613 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
614 || RtsFlags.ProfFlags.bioSelector != NULL)
618 // NO MORE EVACUATION AFTER THIS POINT!
619 // Finally: compaction of the oldest generation.
620 if (major_gc && oldest_gen->steps[0].is_compacted) {
621 // save number of blocks for stats
622 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
626 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
628 /* run through all the generations/steps and tidy up
630 copied = new_blocks * BLOCK_SIZE_W;
631 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
634 generations[g].collections++; // for stats
637 for (s = 0; s < generations[g].n_steps; s++) {
639 stp = &generations[g].steps[s];
641 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
642 // stats information: how much we copied
644 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
649 // for generations we collected...
652 // rough calculation of garbage collected, for stats output
653 if (stp->is_compacted) {
654 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
656 collected += stp->n_blocks * BLOCK_SIZE_W;
659 /* free old memory and shift to-space into from-space for all
660 * the collected steps (except the allocation area). These
661 * freed blocks will probaby be quickly recycled.
663 if (!(g == 0 && s == 0)) {
664 if (stp->is_compacted) {
665 // for a compacted step, just shift the new to-space
666 // onto the front of the now-compacted existing blocks.
667 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
668 bd->flags &= ~BF_EVACUATED; // now from-space
670 // tack the new blocks on the end of the existing blocks
671 if (stp->blocks == NULL) {
672 stp->blocks = stp->to_blocks;
674 for (bd = stp->blocks; bd != NULL; bd = next) {
677 bd->link = stp->to_blocks;
681 // add the new blocks to the block tally
682 stp->n_blocks += stp->n_to_blocks;
684 freeChain(stp->blocks);
685 stp->blocks = stp->to_blocks;
686 stp->n_blocks = stp->n_to_blocks;
687 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
688 bd->flags &= ~BF_EVACUATED; // now from-space
691 stp->to_blocks = NULL;
692 stp->n_to_blocks = 0;
695 /* LARGE OBJECTS. The current live large objects are chained on
696 * scavenged_large, having been moved during garbage
697 * collection from large_objects. Any objects left on
698 * large_objects list are therefore dead, so we free them here.
700 for (bd = stp->large_objects; bd != NULL; bd = next) {
706 // update the count of blocks used by large objects
707 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
708 bd->flags &= ~BF_EVACUATED;
710 stp->large_objects = stp->scavenged_large_objects;
711 stp->n_large_blocks = stp->n_scavenged_large_blocks;
714 // for older generations...
716 /* For older generations, we need to append the
717 * scavenged_large_object list (i.e. large objects that have been
718 * promoted during this GC) to the large_object list for that step.
720 for (bd = stp->scavenged_large_objects; bd; bd = next) {
722 bd->flags &= ~BF_EVACUATED;
723 dbl_link_onto(bd, &stp->large_objects);
726 // add the new blocks we promoted during this GC
727 stp->n_blocks += stp->n_to_blocks;
728 stp->n_large_blocks += stp->n_scavenged_large_blocks;
733 /* Reset the sizes of the older generations when we do a major
736 * CURRENT STRATEGY: make all generations except zero the same size.
737 * We have to stay within the maximum heap size, and leave a certain
738 * percentage of the maximum heap size available to allocate into.
740 if (major_gc && RtsFlags.GcFlags.generations > 1) {
741 nat live, size, min_alloc;
742 nat max = RtsFlags.GcFlags.maxHeapSize;
743 nat gens = RtsFlags.GcFlags.generations;
745 // live in the oldest generations
746 live = oldest_gen->steps[0].n_blocks +
747 oldest_gen->steps[0].n_large_blocks;
749 // default max size for all generations except zero
750 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
751 RtsFlags.GcFlags.minOldGenSize);
753 // minimum size for generation zero
754 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
755 RtsFlags.GcFlags.minAllocAreaSize);
757 // Auto-enable compaction when the residency reaches a
758 // certain percentage of the maximum heap size (default: 30%).
759 if (RtsFlags.GcFlags.generations > 1 &&
760 (RtsFlags.GcFlags.compact ||
762 oldest_gen->steps[0].n_blocks >
763 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
764 oldest_gen->steps[0].is_compacted = 1;
765 // fprintf(stderr,"compaction: on\n", live);
767 oldest_gen->steps[0].is_compacted = 0;
768 // fprintf(stderr,"compaction: off\n", live);
771 // if we're going to go over the maximum heap size, reduce the
772 // size of the generations accordingly. The calculation is
773 // different if compaction is turned on, because we don't need
774 // to double the space required to collect the old generation.
777 // this test is necessary to ensure that the calculations
778 // below don't have any negative results - we're working
779 // with unsigned values here.
780 if (max < min_alloc) {
784 if (oldest_gen->steps[0].is_compacted) {
785 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
786 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
789 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
790 size = (max - min_alloc) / ((gens - 1) * 2);
800 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
801 min_alloc, size, max);
804 for (g = 0; g < gens; g++) {
805 generations[g].max_blocks = size;
809 // Guess the amount of live data for stats.
812 /* Free the small objects allocated via allocate(), since this will
813 * all have been copied into G0S1 now.
815 if (small_alloc_list != NULL) {
816 freeChain(small_alloc_list);
818 small_alloc_list = NULL;
822 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
824 // Start a new pinned_object_block
825 pinned_object_block = NULL;
827 /* Free the mark stack.
829 if (mark_stack_bdescr != NULL) {
830 freeGroup(mark_stack_bdescr);
835 for (g = 0; g <= N; g++) {
836 for (s = 0; s < generations[g].n_steps; s++) {
837 stp = &generations[g].steps[s];
838 if (stp->is_compacted && stp->bitmap != NULL) {
839 freeGroup(stp->bitmap);
844 /* Two-space collector:
845 * Free the old to-space, and estimate the amount of live data.
847 if (RtsFlags.GcFlags.generations == 1) {
850 if (old_to_blocks != NULL) {
851 freeChain(old_to_blocks);
853 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
854 bd->flags = 0; // now from-space
857 /* For a two-space collector, we need to resize the nursery. */
859 /* set up a new nursery. Allocate a nursery size based on a
860 * function of the amount of live data (by default a factor of 2)
861 * Use the blocks from the old nursery if possible, freeing up any
864 * If we get near the maximum heap size, then adjust our nursery
865 * size accordingly. If the nursery is the same size as the live
866 * data (L), then we need 3L bytes. We can reduce the size of the
867 * nursery to bring the required memory down near 2L bytes.
869 * A normal 2-space collector would need 4L bytes to give the same
870 * performance we get from 3L bytes, reducing to the same
871 * performance at 2L bytes.
873 blocks = g0s0->n_to_blocks;
875 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
876 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
877 RtsFlags.GcFlags.maxHeapSize ) {
878 long adjusted_blocks; // signed on purpose
881 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
882 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
883 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
884 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
887 blocks = adjusted_blocks;
890 blocks *= RtsFlags.GcFlags.oldGenFactor;
891 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
892 blocks = RtsFlags.GcFlags.minAllocAreaSize;
895 resizeNursery(blocks);
898 /* Generational collector:
899 * If the user has given us a suggested heap size, adjust our
900 * allocation area to make best use of the memory available.
903 if (RtsFlags.GcFlags.heapSizeSuggestion) {
905 nat needed = calcNeeded(); // approx blocks needed at next GC
907 /* Guess how much will be live in generation 0 step 0 next time.
908 * A good approximation is obtained by finding the
909 * percentage of g0s0 that was live at the last minor GC.
912 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
915 /* Estimate a size for the allocation area based on the
916 * information available. We might end up going slightly under
917 * or over the suggested heap size, but we should be pretty
920 * Formula: suggested - needed
921 * ----------------------------
922 * 1 + g0s0_pcnt_kept/100
924 * where 'needed' is the amount of memory needed at the next
925 * collection for collecting all steps except g0s0.
928 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
929 (100 + (long)g0s0_pcnt_kept);
931 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
932 blocks = RtsFlags.GcFlags.minAllocAreaSize;
935 resizeNursery((nat)blocks);
938 // we might have added extra large blocks to the nursery, so
939 // resize back to minAllocAreaSize again.
940 resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
944 // mark the garbage collected CAFs as dead
945 #if 0 && defined(DEBUG) // doesn't work at the moment
946 if (major_gc) { gcCAFs(); }
950 // resetStaticObjectForRetainerProfiling() must be called before
952 resetStaticObjectForRetainerProfiling();
955 // zero the scavenged static object list
957 zero_static_object_list(scavenged_static_objects);
963 // let go of lock (so that it can be re-grabbed below).
964 RELEASE_LOCK(&sched_mutex);
966 // start any pending finalizers
967 scheduleFinalizers(old_weak_ptr_list);
969 // send exceptions to any threads which were about to die
970 resurrectThreads(resurrected_threads);
972 ACQUIRE_LOCK(&sched_mutex);
974 // Update the stable pointer hash table.
975 updateStablePtrTable(major_gc);
977 // check sanity after GC
978 IF_DEBUG(sanity, checkSanity());
980 // extra GC trace info
981 IF_DEBUG(gc, statDescribeGens());
984 // symbol-table based profiling
985 /* heapCensus(to_blocks); */ /* ToDo */
988 // restore enclosing cost centre
993 // check for memory leaks if sanity checking is on
994 IF_DEBUG(sanity, memInventory());
996 #ifdef RTS_GTK_FRONTPANEL
997 if (RtsFlags.GcFlags.frontpanel) {
998 updateFrontPanelAfterGC( N, live );
1002 // ok, GC over: tell the stats department what happened.
1003 stat_endGC(allocated, collected, live, copied, N);
1009 /* -----------------------------------------------------------------------------
1012 traverse_weak_ptr_list is called possibly many times during garbage
1013 collection. It returns a flag indicating whether it did any work
1014 (i.e. called evacuate on any live pointers).
1016 Invariant: traverse_weak_ptr_list is called when the heap is in an
1017 idempotent state. That means that there are no pending
1018 evacuate/scavenge operations. This invariant helps the weak
1019 pointer code decide which weak pointers are dead - if there are no
1020 new live weak pointers, then all the currently unreachable ones are
1023 For generational GC: we just don't try to finalize weak pointers in
1024 older generations than the one we're collecting. This could
1025 probably be optimised by keeping per-generation lists of weak
1026 pointers, but for a few weak pointers this scheme will work.
1027 -------------------------------------------------------------------------- */
1030 traverse_weak_ptr_list(void)
1032 StgWeak *w, **last_w, *next_w;
1034 rtsBool flag = rtsFalse;
1036 if (weak_done) { return rtsFalse; }
1038 /* doesn't matter where we evacuate values/finalizers to, since
1039 * these pointers are treated as roots (iff the keys are alive).
1043 last_w = &old_weak_ptr_list;
1044 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1046 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1047 * called on a live weak pointer object. Just remove it.
1049 if (w->header.info == &stg_DEAD_WEAK_info) {
1050 next_w = ((StgDeadWeak *)w)->link;
1055 ASSERT(get_itbl(w)->type == WEAK);
1057 /* Now, check whether the key is reachable.
1059 new = isAlive(w->key);
1062 // evacuate the value and finalizer
1063 w->value = evacuate(w->value);
1064 w->finalizer = evacuate(w->finalizer);
1065 // remove this weak ptr from the old_weak_ptr list
1067 // and put it on the new weak ptr list
1069 w->link = weak_ptr_list;
1072 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1076 last_w = &(w->link);
1082 /* Now deal with the all_threads list, which behaves somewhat like
1083 * the weak ptr list. If we discover any threads that are about to
1084 * become garbage, we wake them up and administer an exception.
1087 StgTSO *t, *tmp, *next, **prev;
1089 prev = &old_all_threads;
1090 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1092 (StgClosure *)tmp = isAlive((StgClosure *)t);
1098 ASSERT(get_itbl(t)->type == TSO);
1099 switch (t->what_next) {
1100 case ThreadRelocated:
1105 case ThreadComplete:
1106 // finshed or died. The thread might still be alive, but we
1107 // don't keep it on the all_threads list. Don't forget to
1108 // stub out its global_link field.
1109 next = t->global_link;
1110 t->global_link = END_TSO_QUEUE;
1118 // not alive (yet): leave this thread on the old_all_threads list.
1119 prev = &(t->global_link);
1120 next = t->global_link;
1123 // alive: move this thread onto the all_threads list.
1124 next = t->global_link;
1125 t->global_link = all_threads;
1132 /* If we didn't make any changes, then we can go round and kill all
1133 * the dead weak pointers. The old_weak_ptr list is used as a list
1134 * of pending finalizers later on.
1136 if (flag == rtsFalse) {
1137 for (w = old_weak_ptr_list; w; w = w->link) {
1138 w->finalizer = evacuate(w->finalizer);
1141 /* And resurrect any threads which were about to become garbage.
1144 StgTSO *t, *tmp, *next;
1145 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1146 next = t->global_link;
1147 (StgClosure *)tmp = evacuate((StgClosure *)t);
1148 tmp->global_link = resurrected_threads;
1149 resurrected_threads = tmp;
1153 weak_done = rtsTrue;
1159 /* -----------------------------------------------------------------------------
1160 After GC, the live weak pointer list may have forwarding pointers
1161 on it, because a weak pointer object was evacuated after being
1162 moved to the live weak pointer list. We remove those forwarding
1165 Also, we don't consider weak pointer objects to be reachable, but
1166 we must nevertheless consider them to be "live" and retain them.
1167 Therefore any weak pointer objects which haven't as yet been
1168 evacuated need to be evacuated now.
1169 -------------------------------------------------------------------------- */
1173 mark_weak_ptr_list ( StgWeak **list )
1175 StgWeak *w, **last_w;
1178 for (w = *list; w; w = w->link) {
1179 (StgClosure *)w = evacuate((StgClosure *)w);
1181 last_w = &(w->link);
1185 /* -----------------------------------------------------------------------------
1186 isAlive determines whether the given closure is still alive (after
1187 a garbage collection) or not. It returns the new address of the
1188 closure if it is alive, or NULL otherwise.
1190 NOTE: Use it before compaction only!
1191 -------------------------------------------------------------------------- */
1195 isAlive(StgClosure *p)
1197 const StgInfoTable *info;
1204 /* ToDo: for static closures, check the static link field.
1205 * Problem here is that we sometimes don't set the link field, eg.
1206 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1211 // ignore closures in generations that we're not collecting.
1212 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1215 // large objects have an evacuated flag
1216 if (bd->flags & BF_LARGE) {
1217 if (bd->flags & BF_EVACUATED) {
1223 // check the mark bit for compacted steps
1224 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1228 switch (info->type) {
1233 case IND_OLDGEN: // rely on compatible layout with StgInd
1234 case IND_OLDGEN_PERM:
1235 // follow indirections
1236 p = ((StgInd *)p)->indirectee;
1241 return ((StgEvacuated *)p)->evacuee;
1244 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1245 p = (StgClosure *)((StgTSO *)p)->link;
1257 mark_root(StgClosure **root)
1259 *root = evacuate(*root);
1265 bdescr *bd = allocBlock();
1266 bd->gen_no = stp->gen_no;
1269 if (stp->gen_no <= N) {
1270 bd->flags = BF_EVACUATED;
1275 stp->hp_bd->free = stp->hp;
1276 stp->hp_bd->link = bd;
1277 stp->hp = bd->start;
1278 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1285 static __inline__ void
1286 upd_evacuee(StgClosure *p, StgClosure *dest)
1288 p->header.info = &stg_EVACUATED_info;
1289 ((StgEvacuated *)p)->evacuee = dest;
1293 static __inline__ StgClosure *
1294 copy(StgClosure *src, nat size, step *stp)
1299 nat size_org = size;
1302 TICK_GC_WORDS_COPIED(size);
1303 /* Find out where we're going, using the handy "to" pointer in
1304 * the step of the source object. If it turns out we need to
1305 * evacuate to an older generation, adjust it here (see comment
1308 if (stp->gen_no < evac_gen) {
1309 #ifdef NO_EAGER_PROMOTION
1310 failed_to_evac = rtsTrue;
1312 stp = &generations[evac_gen].steps[0];
1316 /* chain a new block onto the to-space for the destination step if
1319 if (stp->hp + size >= stp->hpLim) {
1323 for(to = stp->hp, from = (P_)src; size>0; --size) {
1329 upd_evacuee(src,(StgClosure *)dest);
1331 // We store the size of the just evacuated object in the LDV word so that
1332 // the profiler can guess the position of the next object later.
1333 SET_EVACUAEE_FOR_LDV(src, size_org);
1335 return (StgClosure *)dest;
1338 /* Special version of copy() for when we only want to copy the info
1339 * pointer of an object, but reserve some padding after it. This is
1340 * used to optimise evacuation of BLACKHOLEs.
1345 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1350 nat size_to_copy_org = size_to_copy;
1353 TICK_GC_WORDS_COPIED(size_to_copy);
1354 if (stp->gen_no < evac_gen) {
1355 #ifdef NO_EAGER_PROMOTION
1356 failed_to_evac = rtsTrue;
1358 stp = &generations[evac_gen].steps[0];
1362 if (stp->hp + size_to_reserve >= stp->hpLim) {
1366 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1371 stp->hp += size_to_reserve;
1372 upd_evacuee(src,(StgClosure *)dest);
1374 // We store the size of the just evacuated object in the LDV word so that
1375 // the profiler can guess the position of the next object later.
1376 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1378 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1380 if (size_to_reserve - size_to_copy_org > 0)
1381 FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1383 return (StgClosure *)dest;
1387 /* -----------------------------------------------------------------------------
1388 Evacuate a large object
1390 This just consists of removing the object from the (doubly-linked)
1391 large_alloc_list, and linking it on to the (singly-linked)
1392 new_large_objects list, from where it will be scavenged later.
1394 Convention: bd->flags has BF_EVACUATED set for a large object
1395 that has been evacuated, or unset otherwise.
1396 -------------------------------------------------------------------------- */
1400 evacuate_large(StgPtr p)
1402 bdescr *bd = Bdescr(p);
1405 // object must be at the beginning of the block (or be a ByteArray)
1406 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1407 (((W_)p & BLOCK_MASK) == 0));
1409 // already evacuated?
1410 if (bd->flags & BF_EVACUATED) {
1411 /* Don't forget to set the failed_to_evac flag if we didn't get
1412 * the desired destination (see comments in evacuate()).
1414 if (bd->gen_no < evac_gen) {
1415 failed_to_evac = rtsTrue;
1416 TICK_GC_FAILED_PROMOTION();
1422 // remove from large_object list
1424 bd->u.back->link = bd->link;
1425 } else { // first object in the list
1426 stp->large_objects = bd->link;
1429 bd->link->u.back = bd->u.back;
1432 /* link it on to the evacuated large object list of the destination step
1435 if (stp->gen_no < evac_gen) {
1436 #ifdef NO_EAGER_PROMOTION
1437 failed_to_evac = rtsTrue;
1439 stp = &generations[evac_gen].steps[0];
1444 bd->gen_no = stp->gen_no;
1445 bd->link = stp->new_large_objects;
1446 stp->new_large_objects = bd;
1447 bd->flags |= BF_EVACUATED;
1450 /* -----------------------------------------------------------------------------
1451 Adding a MUT_CONS to an older generation.
1453 This is necessary from time to time when we end up with an
1454 old-to-new generation pointer in a non-mutable object. We defer
1455 the promotion until the next GC.
1456 -------------------------------------------------------------------------- */
1460 mkMutCons(StgClosure *ptr, generation *gen)
1465 stp = &gen->steps[0];
1467 /* chain a new block onto the to-space for the destination step if
1470 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1474 q = (StgMutVar *)stp->hp;
1475 stp->hp += sizeofW(StgMutVar);
1477 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1479 recordOldToNewPtrs((StgMutClosure *)q);
1481 return (StgClosure *)q;
1484 /* -----------------------------------------------------------------------------
1487 This is called (eventually) for every live object in the system.
1489 The caller to evacuate specifies a desired generation in the
1490 evac_gen global variable. The following conditions apply to
1491 evacuating an object which resides in generation M when we're
1492 collecting up to generation N
1496 else evac to step->to
1498 if M < evac_gen evac to evac_gen, step 0
1500 if the object is already evacuated, then we check which generation
1503 if M >= evac_gen do nothing
1504 if M < evac_gen set failed_to_evac flag to indicate that we
1505 didn't manage to evacuate this object into evac_gen.
1507 -------------------------------------------------------------------------- */
1510 evacuate(StgClosure *q)
1515 const StgInfoTable *info;
1518 if (HEAP_ALLOCED(q)) {
1521 // not a group head: find the group head
1522 if (bd->blocks == 0) { bd = bd->link; }
1524 if (bd->gen_no > N) {
1525 /* Can't evacuate this object, because it's in a generation
1526 * older than the ones we're collecting. Let's hope that it's
1527 * in evac_gen or older, or we will have to arrange to track
1528 * this pointer using the mutable list.
1530 if (bd->gen_no < evac_gen) {
1532 failed_to_evac = rtsTrue;
1533 TICK_GC_FAILED_PROMOTION();
1538 /* evacuate large objects by re-linking them onto a different list.
1540 if (bd->flags & BF_LARGE) {
1542 if (info->type == TSO &&
1543 ((StgTSO *)q)->what_next == ThreadRelocated) {
1544 q = (StgClosure *)((StgTSO *)q)->link;
1547 evacuate_large((P_)q);
1551 /* If the object is in a step that we're compacting, then we
1552 * need to use an alternative evacuate procedure.
1554 if (bd->step->is_compacted) {
1555 if (!is_marked((P_)q,bd)) {
1557 if (mark_stack_full()) {
1558 mark_stack_overflowed = rtsTrue;
1561 push_mark_stack((P_)q);
1569 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1572 // make sure the info pointer is into text space
1573 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1574 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1577 switch (info -> type) {
1581 to = copy(q,sizeW_fromITBL(info),stp);
1586 StgWord w = (StgWord)q->payload[0];
1587 if (q->header.info == Czh_con_info &&
1588 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1589 (StgChar)w <= MAX_CHARLIKE) {
1590 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1592 if (q->header.info == Izh_con_info &&
1593 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1594 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1596 // else, fall through ...
1602 return copy(q,sizeofW(StgHeader)+1,stp);
1604 case THUNK_1_0: // here because of MIN_UPD_SIZE
1609 #ifdef NO_PROMOTE_THUNKS
1610 if (bd->gen_no == 0 &&
1611 bd->step->no != 0 &&
1612 bd->step->no == generations[bd->gen_no].n_steps-1) {
1616 return copy(q,sizeofW(StgHeader)+2,stp);
1624 return copy(q,sizeofW(StgHeader)+2,stp);
1630 case IND_OLDGEN_PERM:
1635 return copy(q,sizeW_fromITBL(info),stp);
1638 case SE_CAF_BLACKHOLE:
1641 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1644 to = copy(q,BLACKHOLE_sizeW(),stp);
1647 case THUNK_SELECTOR:
1649 const StgInfoTable* selectee_info;
1650 StgClosure* selectee = ((StgSelector*)q)->selectee;
1653 selectee_info = get_itbl(selectee);
1654 switch (selectee_info->type) {
1662 case CONSTR_NOCAF_STATIC:
1664 StgWord offset = info->layout.selector_offset;
1666 // check that the size is in range
1668 (StgWord32)(selectee_info->layout.payload.ptrs +
1669 selectee_info->layout.payload.nptrs));
1671 // perform the selection!
1672 q = selectee->payload[offset];
1673 if (major_gc==rtsTrue) {TICK_GC_SEL_MAJOR();} else {TICK_GC_SEL_MINOR();}
1675 /* if we're already in to-space, there's no need to continue
1676 * with the evacuation, just update the source address with
1677 * a pointer to the (evacuated) constructor field.
1679 if (HEAP_ALLOCED(q)) {
1680 bdescr *bd = Bdescr((P_)q);
1681 if (bd->flags & BF_EVACUATED) {
1682 if (bd->gen_no < evac_gen) {
1683 failed_to_evac = rtsTrue;
1684 TICK_GC_FAILED_PROMOTION();
1690 /* otherwise, carry on and evacuate this constructor field,
1691 * (but not the constructor itself)
1700 case IND_OLDGEN_PERM:
1701 selectee = ((StgInd *)selectee)->indirectee;
1705 selectee = ((StgEvacuated *)selectee)->evacuee;
1708 case THUNK_SELECTOR:
1710 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1711 something) to go into an infinite loop when the nightly
1712 stage2 compiles PrelTup.lhs. */
1714 /* we can't recurse indefinitely in evacuate(), so set a
1715 * limit on the number of times we can go around this
1718 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1720 bd = Bdescr((P_)selectee);
1721 if (!bd->flags & BF_EVACUATED) {
1722 thunk_selector_depth++;
1723 selectee = evacuate(selectee);
1724 thunk_selector_depth--;
1728 TICK_GC_SEL_ABANDONED();
1729 // and fall through...
1742 case SE_CAF_BLACKHOLE:
1746 // not evaluated yet
1750 // a copy of the top-level cases below
1751 case RBH: // cf. BLACKHOLE_BQ
1753 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1754 to = copy(q,BLACKHOLE_sizeW(),stp);
1755 //ToDo: derive size etc from reverted IP
1756 //to = copy(q,size,stp);
1757 // recordMutable((StgMutClosure *)to);
1762 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1763 to = copy(q,sizeofW(StgBlockedFetch),stp);
1770 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1771 to = copy(q,sizeofW(StgFetchMe),stp);
1775 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1776 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1781 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1782 (int)(selectee_info->type));
1785 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1789 // follow chains of indirections, don't evacuate them
1790 q = ((StgInd*)q)->indirectee;
1794 if (info->srt_len > 0 && major_gc &&
1795 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1796 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1797 static_objects = (StgClosure *)q;
1802 if (info->srt_len > 0 && major_gc &&
1803 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1804 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1805 static_objects = (StgClosure *)q;
1810 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1811 * on the CAF list, so don't do anything with it here (we'll
1812 * scavenge it later).
1815 && ((StgIndStatic *)q)->saved_info == NULL
1816 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1817 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1818 static_objects = (StgClosure *)q;
1823 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1824 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1825 static_objects = (StgClosure *)q;
1829 case CONSTR_INTLIKE:
1830 case CONSTR_CHARLIKE:
1831 case CONSTR_NOCAF_STATIC:
1832 /* no need to put these on the static linked list, they don't need
1847 // shouldn't see these
1848 barf("evacuate: stack frame at %p\n", q);
1852 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1853 * of stack, tagging and all.
1855 return copy(q,pap_sizeW((StgPAP*)q),stp);
1858 /* Already evacuated, just return the forwarding address.
1859 * HOWEVER: if the requested destination generation (evac_gen) is
1860 * older than the actual generation (because the object was
1861 * already evacuated to a younger generation) then we have to
1862 * set the failed_to_evac flag to indicate that we couldn't
1863 * manage to promote the object to the desired generation.
1865 if (evac_gen > 0) { // optimisation
1866 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1867 if (Bdescr((P_)p)->gen_no < evac_gen) {
1868 failed_to_evac = rtsTrue;
1869 TICK_GC_FAILED_PROMOTION();
1872 return ((StgEvacuated*)q)->evacuee;
1875 // just copy the block
1876 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1879 case MUT_ARR_PTRS_FROZEN:
1880 // just copy the block
1881 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1885 StgTSO *tso = (StgTSO *)q;
1887 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1889 if (tso->what_next == ThreadRelocated) {
1890 q = (StgClosure *)tso->link;
1894 /* To evacuate a small TSO, we need to relocate the update frame
1898 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1899 move_TSO(tso, new_tso);
1900 return (StgClosure *)new_tso;
1905 case RBH: // cf. BLACKHOLE_BQ
1907 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1908 to = copy(q,BLACKHOLE_sizeW(),stp);
1909 //ToDo: derive size etc from reverted IP
1910 //to = copy(q,size,stp);
1912 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1913 q, info_type(q), to, info_type(to)));
1918 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1919 to = copy(q,sizeofW(StgBlockedFetch),stp);
1921 belch("@@ evacuate: %p (%s) to %p (%s)",
1922 q, info_type(q), to, info_type(to)));
1929 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1930 to = copy(q,sizeofW(StgFetchMe),stp);
1932 belch("@@ evacuate: %p (%s) to %p (%s)",
1933 q, info_type(q), to, info_type(to)));
1937 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1938 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1940 belch("@@ evacuate: %p (%s) to %p (%s)",
1941 q, info_type(q), to, info_type(to)));
1946 barf("evacuate: strange closure type %d", (int)(info->type));
1952 /* -----------------------------------------------------------------------------
1953 move_TSO is called to update the TSO structure after it has been
1954 moved from one place to another.
1955 -------------------------------------------------------------------------- */
1958 move_TSO(StgTSO *src, StgTSO *dest)
1962 // relocate the stack pointers...
1963 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1964 dest->sp = (StgPtr)dest->sp + diff;
1965 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1967 relocate_stack(dest, diff);
1970 /* -----------------------------------------------------------------------------
1971 relocate_stack is called to update the linkage between
1972 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1974 -------------------------------------------------------------------------- */
1977 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1985 while ((P_)su < dest->stack + dest->stack_size) {
1986 switch (get_itbl(su)->type) {
1988 // GCC actually manages to common up these three cases!
1991 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1996 cf = (StgCatchFrame *)su;
1997 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
2002 sf = (StgSeqFrame *)su;
2003 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
2012 barf("relocate_stack %d", (int)(get_itbl(su)->type));
2023 scavenge_srt(const StgInfoTable *info)
2025 StgClosure **srt, **srt_end;
2027 /* evacuate the SRT. If srt_len is zero, then there isn't an
2028 * srt field in the info table. That's ok, because we'll
2029 * never dereference it.
2031 srt = (StgClosure **)(info->srt);
2032 srt_end = srt + info->srt_len;
2033 for (; srt < srt_end; srt++) {
2034 /* Special-case to handle references to closures hiding out in DLLs, since
2035 double indirections required to get at those. The code generator knows
2036 which is which when generating the SRT, so it stores the (indirect)
2037 reference to the DLL closure in the table by first adding one to it.
2038 We check for this here, and undo the addition before evacuating it.
2040 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2041 closure that's fixed at link-time, and no extra magic is required.
2043 #ifdef ENABLE_WIN32_DLL_SUPPORT
2044 if ( (unsigned long)(*srt) & 0x1 ) {
2045 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2055 /* -----------------------------------------------------------------------------
2057 -------------------------------------------------------------------------- */
2060 scavengeTSO (StgTSO *tso)
2062 // chase the link field for any TSOs on the same queue
2063 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2064 if ( tso->why_blocked == BlockedOnMVar
2065 || tso->why_blocked == BlockedOnBlackHole
2066 || tso->why_blocked == BlockedOnException
2068 || tso->why_blocked == BlockedOnGA
2069 || tso->why_blocked == BlockedOnGA_NoSend
2072 tso->block_info.closure = evacuate(tso->block_info.closure);
2074 if ( tso->blocked_exceptions != NULL ) {
2075 tso->blocked_exceptions =
2076 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2078 // scavenge this thread's stack
2079 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2082 /* -----------------------------------------------------------------------------
2083 Scavenge a given step until there are no more objects in this step
2086 evac_gen is set by the caller to be either zero (for a step in a
2087 generation < N) or G where G is the generation of the step being
2090 We sometimes temporarily change evac_gen back to zero if we're
2091 scavenging a mutable object where early promotion isn't such a good
2093 -------------------------------------------------------------------------- */
2101 nat saved_evac_gen = evac_gen;
2106 failed_to_evac = rtsFalse;
2108 /* scavenge phase - standard breadth-first scavenging of the
2112 while (bd != stp->hp_bd || p < stp->hp) {
2114 // If we're at the end of this block, move on to the next block
2115 if (bd != stp->hp_bd && p == bd->free) {
2121 info = get_itbl((StgClosure *)p);
2122 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2125 switch (info->type) {
2128 /* treat MVars specially, because we don't want to evacuate the
2129 * mut_link field in the middle of the closure.
2132 StgMVar *mvar = ((StgMVar *)p);
2134 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2135 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2136 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2137 evac_gen = saved_evac_gen;
2138 recordMutable((StgMutClosure *)mvar);
2139 failed_to_evac = rtsFalse; // mutable.
2140 p += sizeofW(StgMVar);
2148 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2149 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2150 p += sizeofW(StgHeader) + 2;
2155 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2156 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2162 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2163 p += sizeofW(StgHeader) + 1;
2168 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2174 p += sizeofW(StgHeader) + 1;
2181 p += sizeofW(StgHeader) + 2;
2188 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2189 p += sizeofW(StgHeader) + 2;
2205 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2206 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2207 (StgClosure *)*p = evacuate((StgClosure *)*p);
2209 p += info->layout.payload.nptrs;
2214 if (stp->gen->no != 0) {
2217 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2218 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2219 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2222 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
2224 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2227 // We pretend that p has just been created.
2228 LDV_recordCreate((StgClosure *)p);
2232 case IND_OLDGEN_PERM:
2233 ((StgIndOldGen *)p)->indirectee =
2234 evacuate(((StgIndOldGen *)p)->indirectee);
2235 if (failed_to_evac) {
2236 failed_to_evac = rtsFalse;
2237 recordOldToNewPtrs((StgMutClosure *)p);
2239 p += sizeofW(StgIndOldGen);
2244 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2245 evac_gen = saved_evac_gen;
2246 recordMutable((StgMutClosure *)p);
2247 failed_to_evac = rtsFalse; // mutable anyhow
2248 p += sizeofW(StgMutVar);
2253 failed_to_evac = rtsFalse; // mutable anyhow
2254 p += sizeofW(StgMutVar);
2258 case SE_CAF_BLACKHOLE:
2261 p += BLACKHOLE_sizeW();
2266 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2267 (StgClosure *)bh->blocking_queue =
2268 evacuate((StgClosure *)bh->blocking_queue);
2269 recordMutable((StgMutClosure *)bh);
2270 failed_to_evac = rtsFalse;
2271 p += BLACKHOLE_sizeW();
2275 case THUNK_SELECTOR:
2277 StgSelector *s = (StgSelector *)p;
2278 s->selectee = evacuate(s->selectee);
2279 p += THUNK_SELECTOR_sizeW();
2283 case AP_UPD: // same as PAPs
2285 /* Treat a PAP just like a section of stack, not forgetting to
2286 * evacuate the function pointer too...
2289 StgPAP* pap = (StgPAP *)p;
2291 pap->fun = evacuate(pap->fun);
2292 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2293 p += pap_sizeW(pap);
2298 // nothing to follow
2299 p += arr_words_sizeW((StgArrWords *)p);
2303 // follow everything
2307 evac_gen = 0; // repeatedly mutable
2308 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2309 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2310 (StgClosure *)*p = evacuate((StgClosure *)*p);
2312 evac_gen = saved_evac_gen;
2313 recordMutable((StgMutClosure *)q);
2314 failed_to_evac = rtsFalse; // mutable anyhow.
2318 case MUT_ARR_PTRS_FROZEN:
2319 // follow everything
2323 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2324 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2325 (StgClosure *)*p = evacuate((StgClosure *)*p);
2327 // it's tempting to recordMutable() if failed_to_evac is
2328 // false, but that breaks some assumptions (eg. every
2329 // closure on the mutable list is supposed to have the MUT
2330 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2336 StgTSO *tso = (StgTSO *)p;
2339 evac_gen = saved_evac_gen;
2340 recordMutable((StgMutClosure *)tso);
2341 failed_to_evac = rtsFalse; // mutable anyhow.
2342 p += tso_sizeW(tso);
2347 case RBH: // cf. BLACKHOLE_BQ
2350 nat size, ptrs, nonptrs, vhs;
2352 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2354 StgRBH *rbh = (StgRBH *)p;
2355 (StgClosure *)rbh->blocking_queue =
2356 evacuate((StgClosure *)rbh->blocking_queue);
2357 recordMutable((StgMutClosure *)to);
2358 failed_to_evac = rtsFalse; // mutable anyhow.
2360 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2361 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2362 // ToDo: use size of reverted closure here!
2363 p += BLACKHOLE_sizeW();
2369 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2370 // follow the pointer to the node which is being demanded
2371 (StgClosure *)bf->node =
2372 evacuate((StgClosure *)bf->node);
2373 // follow the link to the rest of the blocking queue
2374 (StgClosure *)bf->link =
2375 evacuate((StgClosure *)bf->link);
2376 if (failed_to_evac) {
2377 failed_to_evac = rtsFalse;
2378 recordMutable((StgMutClosure *)bf);
2381 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2382 bf, info_type((StgClosure *)bf),
2383 bf->node, info_type(bf->node)));
2384 p += sizeofW(StgBlockedFetch);
2392 p += sizeofW(StgFetchMe);
2393 break; // nothing to do in this case
2395 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2397 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2398 (StgClosure *)fmbq->blocking_queue =
2399 evacuate((StgClosure *)fmbq->blocking_queue);
2400 if (failed_to_evac) {
2401 failed_to_evac = rtsFalse;
2402 recordMutable((StgMutClosure *)fmbq);
2405 belch("@@ scavenge: %p (%s) exciting, isn't it",
2406 p, info_type((StgClosure *)p)));
2407 p += sizeofW(StgFetchMeBlockingQueue);
2413 barf("scavenge: unimplemented/strange closure type %d @ %p",
2417 /* If we didn't manage to promote all the objects pointed to by
2418 * the current object, then we have to designate this object as
2419 * mutable (because it contains old-to-new generation pointers).
2421 if (failed_to_evac) {
2422 failed_to_evac = rtsFalse;
2423 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2431 /* -----------------------------------------------------------------------------
2432 Scavenge everything on the mark stack.
2434 This is slightly different from scavenge():
2435 - we don't walk linearly through the objects, so the scavenger
2436 doesn't need to advance the pointer on to the next object.
2437 -------------------------------------------------------------------------- */
2440 scavenge_mark_stack(void)
2446 evac_gen = oldest_gen->no;
2447 saved_evac_gen = evac_gen;
2450 while (!mark_stack_empty()) {
2451 p = pop_mark_stack();
2453 info = get_itbl((StgClosure *)p);
2454 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2457 switch (info->type) {
2460 /* treat MVars specially, because we don't want to evacuate the
2461 * mut_link field in the middle of the closure.
2464 StgMVar *mvar = ((StgMVar *)p);
2466 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2467 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2468 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2469 evac_gen = saved_evac_gen;
2470 failed_to_evac = rtsFalse; // mutable.
2478 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2479 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2489 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2514 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2515 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2516 (StgClosure *)*p = evacuate((StgClosure *)*p);
2522 // don't need to do anything here: the only possible case
2523 // is that we're in a 1-space compacting collector, with
2524 // no "old" generation.
2528 case IND_OLDGEN_PERM:
2529 ((StgIndOldGen *)p)->indirectee =
2530 evacuate(((StgIndOldGen *)p)->indirectee);
2531 if (failed_to_evac) {
2532 recordOldToNewPtrs((StgMutClosure *)p);
2534 failed_to_evac = rtsFalse;
2539 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2540 evac_gen = saved_evac_gen;
2541 failed_to_evac = rtsFalse;
2546 failed_to_evac = rtsFalse;
2550 case SE_CAF_BLACKHOLE:
2558 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2559 (StgClosure *)bh->blocking_queue =
2560 evacuate((StgClosure *)bh->blocking_queue);
2561 failed_to_evac = rtsFalse;
2565 case THUNK_SELECTOR:
2567 StgSelector *s = (StgSelector *)p;
2568 s->selectee = evacuate(s->selectee);
2572 case AP_UPD: // same as PAPs
2574 /* Treat a PAP just like a section of stack, not forgetting to
2575 * evacuate the function pointer too...
2578 StgPAP* pap = (StgPAP *)p;
2580 pap->fun = evacuate(pap->fun);
2581 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2586 // follow everything
2590 evac_gen = 0; // repeatedly mutable
2591 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2592 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2593 (StgClosure *)*p = evacuate((StgClosure *)*p);
2595 evac_gen = saved_evac_gen;
2596 failed_to_evac = rtsFalse; // mutable anyhow.
2600 case MUT_ARR_PTRS_FROZEN:
2601 // follow everything
2605 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2606 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2607 (StgClosure *)*p = evacuate((StgClosure *)*p);
2614 StgTSO *tso = (StgTSO *)p;
2617 evac_gen = saved_evac_gen;
2618 failed_to_evac = rtsFalse;
2623 case RBH: // cf. BLACKHOLE_BQ
2626 nat size, ptrs, nonptrs, vhs;
2628 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2630 StgRBH *rbh = (StgRBH *)p;
2631 (StgClosure *)rbh->blocking_queue =
2632 evacuate((StgClosure *)rbh->blocking_queue);
2633 recordMutable((StgMutClosure *)rbh);
2634 failed_to_evac = rtsFalse; // mutable anyhow.
2636 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2637 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2643 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2644 // follow the pointer to the node which is being demanded
2645 (StgClosure *)bf->node =
2646 evacuate((StgClosure *)bf->node);
2647 // follow the link to the rest of the blocking queue
2648 (StgClosure *)bf->link =
2649 evacuate((StgClosure *)bf->link);
2650 if (failed_to_evac) {
2651 failed_to_evac = rtsFalse;
2652 recordMutable((StgMutClosure *)bf);
2655 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2656 bf, info_type((StgClosure *)bf),
2657 bf->node, info_type(bf->node)));
2665 break; // nothing to do in this case
2667 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2669 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2670 (StgClosure *)fmbq->blocking_queue =
2671 evacuate((StgClosure *)fmbq->blocking_queue);
2672 if (failed_to_evac) {
2673 failed_to_evac = rtsFalse;
2674 recordMutable((StgMutClosure *)fmbq);
2677 belch("@@ scavenge: %p (%s) exciting, isn't it",
2678 p, info_type((StgClosure *)p)));
2684 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2688 if (failed_to_evac) {
2689 failed_to_evac = rtsFalse;
2690 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2693 // mark the next bit to indicate "scavenged"
2694 mark(q+1, Bdescr(q));
2696 } // while (!mark_stack_empty())
2698 // start a new linear scan if the mark stack overflowed at some point
2699 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2700 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2701 mark_stack_overflowed = rtsFalse;
2702 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2703 oldgen_scan = oldgen_scan_bd->start;
2706 if (oldgen_scan_bd) {
2707 // push a new thing on the mark stack
2709 // find a closure that is marked but not scavenged, and start
2711 while (oldgen_scan < oldgen_scan_bd->free
2712 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2716 if (oldgen_scan < oldgen_scan_bd->free) {
2718 // already scavenged?
2719 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2720 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2723 push_mark_stack(oldgen_scan);
2724 // ToDo: bump the linear scan by the actual size of the object
2725 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2729 oldgen_scan_bd = oldgen_scan_bd->link;
2730 if (oldgen_scan_bd != NULL) {
2731 oldgen_scan = oldgen_scan_bd->start;
2737 /* -----------------------------------------------------------------------------
2738 Scavenge one object.
2740 This is used for objects that are temporarily marked as mutable
2741 because they contain old-to-new generation pointers. Only certain
2742 objects can have this property.
2743 -------------------------------------------------------------------------- */
2746 scavenge_one(StgPtr p)
2748 const StgInfoTable *info;
2749 nat saved_evac_gen = evac_gen;
2752 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2753 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2755 info = get_itbl((StgClosure *)p);
2757 switch (info->type) {
2760 case FUN_1_0: // hardly worth specialising these guys
2780 case IND_OLDGEN_PERM:
2784 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2785 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2786 (StgClosure *)*q = evacuate((StgClosure *)*q);
2792 case SE_CAF_BLACKHOLE:
2797 case THUNK_SELECTOR:
2799 StgSelector *s = (StgSelector *)p;
2800 s->selectee = evacuate(s->selectee);
2805 // nothing to follow
2810 // follow everything
2813 evac_gen = 0; // repeatedly mutable
2814 recordMutable((StgMutClosure *)p);
2815 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2816 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2817 (StgClosure *)*p = evacuate((StgClosure *)*p);
2819 evac_gen = saved_evac_gen;
2820 failed_to_evac = rtsFalse;
2824 case MUT_ARR_PTRS_FROZEN:
2826 // follow everything
2829 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2830 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2831 (StgClosure *)*p = evacuate((StgClosure *)*p);
2838 StgTSO *tso = (StgTSO *)p;
2840 evac_gen = 0; // repeatedly mutable
2842 recordMutable((StgMutClosure *)tso);
2843 evac_gen = saved_evac_gen;
2844 failed_to_evac = rtsFalse;
2851 StgPAP* pap = (StgPAP *)p;
2852 pap->fun = evacuate(pap->fun);
2853 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2858 // This might happen if for instance a MUT_CONS was pointing to a
2859 // THUNK which has since been updated. The IND_OLDGEN will
2860 // be on the mutable list anyway, so we don't need to do anything
2865 barf("scavenge_one: strange object %d", (int)(info->type));
2868 no_luck = failed_to_evac;
2869 failed_to_evac = rtsFalse;
2873 /* -----------------------------------------------------------------------------
2874 Scavenging mutable lists.
2876 We treat the mutable list of each generation > N (i.e. all the
2877 generations older than the one being collected) as roots. We also
2878 remove non-mutable objects from the mutable list at this point.
2879 -------------------------------------------------------------------------- */
2882 scavenge_mut_once_list(generation *gen)
2884 const StgInfoTable *info;
2885 StgMutClosure *p, *next, *new_list;
2887 p = gen->mut_once_list;
2888 new_list = END_MUT_LIST;
2892 failed_to_evac = rtsFalse;
2894 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2896 // make sure the info pointer is into text space
2897 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2898 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2902 if (info->type==RBH)
2903 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2905 switch(info->type) {
2908 case IND_OLDGEN_PERM:
2910 /* Try to pull the indirectee into this generation, so we can
2911 * remove the indirection from the mutable list.
2913 ((StgIndOldGen *)p)->indirectee =
2914 evacuate(((StgIndOldGen *)p)->indirectee);
2916 #if 0 && defined(DEBUG)
2917 if (RtsFlags.DebugFlags.gc)
2918 /* Debugging code to print out the size of the thing we just
2922 StgPtr start = gen->steps[0].scan;
2923 bdescr *start_bd = gen->steps[0].scan_bd;
2925 scavenge(&gen->steps[0]);
2926 if (start_bd != gen->steps[0].scan_bd) {
2927 size += (P_)BLOCK_ROUND_UP(start) - start;
2928 start_bd = start_bd->link;
2929 while (start_bd != gen->steps[0].scan_bd) {
2930 size += BLOCK_SIZE_W;
2931 start_bd = start_bd->link;
2933 size += gen->steps[0].scan -
2934 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2936 size = gen->steps[0].scan - start;
2938 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2942 /* failed_to_evac might happen if we've got more than two
2943 * generations, we're collecting only generation 0, the
2944 * indirection resides in generation 2 and the indirectee is
2947 if (failed_to_evac) {
2948 failed_to_evac = rtsFalse;
2949 p->mut_link = new_list;
2952 /* the mut_link field of an IND_STATIC is overloaded as the
2953 * static link field too (it just so happens that we don't need
2954 * both at the same time), so we need to NULL it out when
2955 * removing this object from the mutable list because the static
2956 * link fields are all assumed to be NULL before doing a major
2964 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2965 * it from the mutable list if possible by promoting whatever it
2968 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2969 /* didn't manage to promote everything, so put the
2970 * MUT_CONS back on the list.
2972 p->mut_link = new_list;
2978 // shouldn't have anything else on the mutables list
2979 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2983 gen->mut_once_list = new_list;
2988 scavenge_mutable_list(generation *gen)
2990 const StgInfoTable *info;
2991 StgMutClosure *p, *next;
2993 p = gen->saved_mut_list;
2997 failed_to_evac = rtsFalse;
2999 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3001 // make sure the info pointer is into text space
3002 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3003 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3007 if (info->type==RBH)
3008 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3010 switch(info->type) {
3013 // follow everything
3014 p->mut_link = gen->mut_list;
3019 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3020 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3021 (StgClosure *)*q = evacuate((StgClosure *)*q);
3026 // Happens if a MUT_ARR_PTRS in the old generation is frozen
3027 case MUT_ARR_PTRS_FROZEN:
3032 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3033 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
3034 (StgClosure *)*q = evacuate((StgClosure *)*q);
3038 if (failed_to_evac) {
3039 failed_to_evac = rtsFalse;
3040 mkMutCons((StgClosure *)p, gen);
3046 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3047 p->mut_link = gen->mut_list;
3053 StgMVar *mvar = (StgMVar *)p;
3054 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
3055 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
3056 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
3057 p->mut_link = gen->mut_list;
3064 StgTSO *tso = (StgTSO *)p;
3068 /* Don't take this TSO off the mutable list - it might still
3069 * point to some younger objects (because we set evac_gen to 0
3072 tso->mut_link = gen->mut_list;
3073 gen->mut_list = (StgMutClosure *)tso;
3079 StgBlockingQueue *bh = (StgBlockingQueue *)p;
3080 (StgClosure *)bh->blocking_queue =
3081 evacuate((StgClosure *)bh->blocking_queue);
3082 p->mut_link = gen->mut_list;
3087 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3090 case IND_OLDGEN_PERM:
3091 /* Try to pull the indirectee into this generation, so we can
3092 * remove the indirection from the mutable list.
3095 ((StgIndOldGen *)p)->indirectee =
3096 evacuate(((StgIndOldGen *)p)->indirectee);
3099 if (failed_to_evac) {
3100 failed_to_evac = rtsFalse;
3101 p->mut_link = gen->mut_once_list;
3102 gen->mut_once_list = p;
3109 // HWL: check whether all of these are necessary
3111 case RBH: // cf. BLACKHOLE_BQ
3113 // nat size, ptrs, nonptrs, vhs;
3115 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3116 StgRBH *rbh = (StgRBH *)p;
3117 (StgClosure *)rbh->blocking_queue =
3118 evacuate((StgClosure *)rbh->blocking_queue);
3119 if (failed_to_evac) {
3120 failed_to_evac = rtsFalse;
3121 recordMutable((StgMutClosure *)rbh);
3123 // ToDo: use size of reverted closure here!
3124 p += BLACKHOLE_sizeW();
3130 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3131 // follow the pointer to the node which is being demanded
3132 (StgClosure *)bf->node =
3133 evacuate((StgClosure *)bf->node);
3134 // follow the link to the rest of the blocking queue
3135 (StgClosure *)bf->link =
3136 evacuate((StgClosure *)bf->link);
3137 if (failed_to_evac) {
3138 failed_to_evac = rtsFalse;
3139 recordMutable((StgMutClosure *)bf);
3141 p += sizeofW(StgBlockedFetch);
3147 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3150 p += sizeofW(StgFetchMe);
3151 break; // nothing to do in this case
3153 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3155 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3156 (StgClosure *)fmbq->blocking_queue =
3157 evacuate((StgClosure *)fmbq->blocking_queue);
3158 if (failed_to_evac) {
3159 failed_to_evac = rtsFalse;
3160 recordMutable((StgMutClosure *)fmbq);
3162 p += sizeofW(StgFetchMeBlockingQueue);
3168 // shouldn't have anything else on the mutables list
3169 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3176 scavenge_static(void)
3178 StgClosure* p = static_objects;
3179 const StgInfoTable *info;
3181 /* Always evacuate straight to the oldest generation for static
3183 evac_gen = oldest_gen->no;
3185 /* keep going until we've scavenged all the objects on the linked
3187 while (p != END_OF_STATIC_LIST) {
3191 if (info->type==RBH)
3192 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3194 // make sure the info pointer is into text space
3195 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3196 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3198 /* Take this object *off* the static_objects list,
3199 * and put it on the scavenged_static_objects list.
3201 static_objects = STATIC_LINK(info,p);
3202 STATIC_LINK(info,p) = scavenged_static_objects;
3203 scavenged_static_objects = p;
3205 switch (info -> type) {
3209 StgInd *ind = (StgInd *)p;
3210 ind->indirectee = evacuate(ind->indirectee);
3212 /* might fail to evacuate it, in which case we have to pop it
3213 * back on the mutable list (and take it off the
3214 * scavenged_static list because the static link and mut link
3215 * pointers are one and the same).
3217 if (failed_to_evac) {
3218 failed_to_evac = rtsFalse;
3219 scavenged_static_objects = IND_STATIC_LINK(p);
3220 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3221 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3235 next = (P_)p->payload + info->layout.payload.ptrs;
3236 // evacuate the pointers
3237 for (q = (P_)p->payload; q < next; q++) {
3238 (StgClosure *)*q = evacuate((StgClosure *)*q);
3244 barf("scavenge_static: strange closure %d", (int)(info->type));
3247 ASSERT(failed_to_evac == rtsFalse);
3249 /* get the next static object from the list. Remember, there might
3250 * be more stuff on this list now that we've done some evacuating!
3251 * (static_objects is a global)
3257 /* -----------------------------------------------------------------------------
3258 scavenge_stack walks over a section of stack and evacuates all the
3259 objects pointed to by it. We can use the same code for walking
3260 PAPs, since these are just sections of copied stack.
3261 -------------------------------------------------------------------------- */
3264 scavenge_stack(StgPtr p, StgPtr stack_end)
3267 const StgInfoTable* info;
3270 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3273 * Each time around this loop, we are looking at a chunk of stack
3274 * that starts with either a pending argument section or an
3275 * activation record.
3278 while (p < stack_end) {
3281 // If we've got a tag, skip over that many words on the stack
3282 if (IS_ARG_TAG((W_)q)) {
3287 /* Is q a pointer to a closure?
3289 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3291 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3292 ASSERT(closure_STATIC((StgClosure *)q));
3294 // otherwise, must be a pointer into the allocation space.
3297 (StgClosure *)*p = evacuate((StgClosure *)q);
3303 * Otherwise, q must be the info pointer of an activation
3304 * record. All activation records have 'bitmap' style layout
3307 info = get_itbl((StgClosure *)p);
3309 switch (info->type) {
3311 // Dynamic bitmap: the mask is stored on the stack
3313 bitmap = ((StgRetDyn *)p)->liveness;
3314 p = (P_)&((StgRetDyn *)p)->payload[0];
3317 // probably a slow-entry point return address:
3325 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3326 old_p, p, old_p+1));
3328 p++; // what if FHS!=1 !? -- HWL
3333 /* Specialised code for update frames, since they're so common.
3334 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3335 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3339 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3341 p += sizeofW(StgUpdateFrame);
3344 frame->updatee = evacuate(frame->updatee);
3346 #else // specialised code for update frames, not sure if it's worth it.
3348 nat type = get_itbl(frame->updatee)->type;
3350 if (type == EVACUATED) {
3351 frame->updatee = evacuate(frame->updatee);
3354 bdescr *bd = Bdescr((P_)frame->updatee);
3356 if (bd->gen_no > N) {
3357 if (bd->gen_no < evac_gen) {
3358 failed_to_evac = rtsTrue;
3363 // Don't promote blackholes
3365 if (!(stp->gen_no == 0 &&
3367 stp->no == stp->gen->n_steps-1)) {
3374 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3375 sizeofW(StgHeader), stp);
3376 frame->updatee = to;
3379 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3380 frame->updatee = to;
3381 recordMutable((StgMutClosure *)to);
3384 /* will never be SE_{,CAF_}BLACKHOLE, since we
3385 don't push an update frame for single-entry thunks. KSW 1999-01. */
3386 barf("scavenge_stack: UPDATE_FRAME updatee");
3392 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3399 bitmap = info->layout.bitmap;
3401 // this assumes that the payload starts immediately after the info-ptr
3403 while (bitmap != 0) {
3404 if ((bitmap & 1) == 0) {
3405 (StgClosure *)*p = evacuate((StgClosure *)*p);
3408 bitmap = bitmap >> 1;
3415 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3420 StgLargeBitmap *large_bitmap;
3423 large_bitmap = info->layout.large_bitmap;
3426 for (i=0; i<large_bitmap->size; i++) {
3427 bitmap = large_bitmap->bitmap[i];
3428 q = p + BITS_IN(W_);
3429 while (bitmap != 0) {
3430 if ((bitmap & 1) == 0) {
3431 (StgClosure *)*p = evacuate((StgClosure *)*p);
3434 bitmap = bitmap >> 1;
3436 if (i+1 < large_bitmap->size) {
3438 (StgClosure *)*p = evacuate((StgClosure *)*p);
3444 // and don't forget to follow the SRT
3449 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3454 /*-----------------------------------------------------------------------------
3455 scavenge the large object list.
3457 evac_gen set by caller; similar games played with evac_gen as with
3458 scavenge() - see comment at the top of scavenge(). Most large
3459 objects are (repeatedly) mutable, so most of the time evac_gen will
3461 --------------------------------------------------------------------------- */
3464 scavenge_large(step *stp)
3469 bd = stp->new_large_objects;
3471 for (; bd != NULL; bd = stp->new_large_objects) {
3473 /* take this object *off* the large objects list and put it on
3474 * the scavenged large objects list. This is so that we can
3475 * treat new_large_objects as a stack and push new objects on
3476 * the front when evacuating.
3478 stp->new_large_objects = bd->link;
3479 dbl_link_onto(bd, &stp->scavenged_large_objects);
3481 // update the block count in this step.
3482 stp->n_scavenged_large_blocks += bd->blocks;
3485 if (scavenge_one(p)) {
3486 mkMutCons((StgClosure *)p, stp->gen);
3491 /* -----------------------------------------------------------------------------
3492 Initialising the static object & mutable lists
3493 -------------------------------------------------------------------------- */
3496 zero_static_object_list(StgClosure* first_static)
3500 const StgInfoTable *info;
3502 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3504 link = STATIC_LINK(info, p);
3505 STATIC_LINK(info,p) = NULL;
3509 /* This function is only needed because we share the mutable link
3510 * field with the static link field in an IND_STATIC, so we have to
3511 * zero the mut_link field before doing a major GC, which needs the
3512 * static link field.
3514 * It doesn't do any harm to zero all the mutable link fields on the
3519 zero_mutable_list( StgMutClosure *first )
3521 StgMutClosure *next, *c;
3523 for (c = first; c != END_MUT_LIST; c = next) {
3529 /* -----------------------------------------------------------------------------
3531 -------------------------------------------------------------------------- */
3538 for (c = (StgIndStatic *)caf_list; c != NULL;
3539 c = (StgIndStatic *)c->static_link)
3541 c->header.info = c->saved_info;
3542 c->saved_info = NULL;
3543 // could, but not necessary: c->static_link = NULL;
3549 markCAFs( evac_fn evac )
3553 for (c = (StgIndStatic *)caf_list; c != NULL;
3554 c = (StgIndStatic *)c->static_link)
3556 evac(&c->indirectee);
3560 /* -----------------------------------------------------------------------------
3561 Sanity code for CAF garbage collection.
3563 With DEBUG turned on, we manage a CAF list in addition to the SRT
3564 mechanism. After GC, we run down the CAF list and blackhole any
3565 CAFs which have been garbage collected. This means we get an error
3566 whenever the program tries to enter a garbage collected CAF.
3568 Any garbage collected CAFs are taken off the CAF list at the same
3570 -------------------------------------------------------------------------- */
3572 #if 0 && defined(DEBUG)
3579 const StgInfoTable *info;
3590 ASSERT(info->type == IND_STATIC);
3592 if (STATIC_LINK(info,p) == NULL) {
3593 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3595 SET_INFO(p,&stg_BLACKHOLE_info);
3596 p = STATIC_LINK2(info,p);
3600 pp = &STATIC_LINK2(info,p);
3607 // belch("%d CAFs live", i);
3612 /* -----------------------------------------------------------------------------
3615 Whenever a thread returns to the scheduler after possibly doing
3616 some work, we have to run down the stack and black-hole all the
3617 closures referred to by update frames.
3618 -------------------------------------------------------------------------- */
3621 threadLazyBlackHole(StgTSO *tso)
3623 StgUpdateFrame *update_frame;
3624 StgBlockingQueue *bh;
3627 stack_end = &tso->stack[tso->stack_size];
3628 update_frame = tso->su;
3631 switch (get_itbl(update_frame)->type) {
3634 update_frame = ((StgCatchFrame *)update_frame)->link;
3638 bh = (StgBlockingQueue *)update_frame->updatee;
3640 /* if the thunk is already blackholed, it means we've also
3641 * already blackholed the rest of the thunks on this stack,
3642 * so we can stop early.
3644 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3645 * don't interfere with this optimisation.
3647 if (bh->header.info == &stg_BLACKHOLE_info) {
3651 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3652 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3653 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3654 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3658 // We pretend that bh is now dead.
3659 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
3661 SET_INFO(bh,&stg_BLACKHOLE_info);
3664 // We pretend that bh has just been created.
3665 LDV_recordCreate(bh);
3669 update_frame = update_frame->link;
3673 update_frame = ((StgSeqFrame *)update_frame)->link;
3679 barf("threadPaused");
3685 /* -----------------------------------------------------------------------------
3688 * Code largely pinched from old RTS, then hacked to bits. We also do
3689 * lazy black holing here.
3691 * -------------------------------------------------------------------------- */
3694 threadSqueezeStack(StgTSO *tso)
3696 lnat displacement = 0;
3697 StgUpdateFrame *frame;
3698 StgUpdateFrame *next_frame; // Temporally next
3699 StgUpdateFrame *prev_frame; // Temporally previous
3701 rtsBool prev_was_update_frame;
3703 StgUpdateFrame *top_frame;
3704 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3706 void printObj( StgClosure *obj ); // from Printer.c
3708 top_frame = tso->su;
3711 bottom = &(tso->stack[tso->stack_size]);
3714 /* There must be at least one frame, namely the STOP_FRAME.
3716 ASSERT((P_)frame < bottom);
3718 /* Walk down the stack, reversing the links between frames so that
3719 * we can walk back up as we squeeze from the bottom. Note that
3720 * next_frame and prev_frame refer to next and previous as they were
3721 * added to the stack, rather than the way we see them in this
3722 * walk. (It makes the next loop less confusing.)
3724 * Stop if we find an update frame pointing to a black hole
3725 * (see comment in threadLazyBlackHole()).
3729 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3730 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3731 prev_frame = frame->link;
3732 frame->link = next_frame;
3737 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3738 printObj((StgClosure *)prev_frame);
3739 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3742 switch (get_itbl(frame)->type) {
3745 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3758 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3760 printObj((StgClosure *)prev_frame);
3763 if (get_itbl(frame)->type == UPDATE_FRAME
3764 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3769 /* Now, we're at the bottom. Frame points to the lowest update
3770 * frame on the stack, and its link actually points to the frame
3771 * above. We have to walk back up the stack, squeezing out empty
3772 * update frames and turning the pointers back around on the way
3775 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3776 * we never want to eliminate it anyway. Just walk one step up
3777 * before starting to squeeze. When you get to the topmost frame,
3778 * remember that there are still some words above it that might have
3785 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3788 * Loop through all of the frames (everything except the very
3789 * bottom). Things are complicated by the fact that we have
3790 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3791 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3793 while (frame != NULL) {
3795 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3796 rtsBool is_update_frame;
3798 next_frame = frame->link;
3799 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3802 * 1. both the previous and current frame are update frames
3803 * 2. the current frame is empty
3805 if (prev_was_update_frame && is_update_frame &&
3806 (P_)prev_frame == frame_bottom + displacement) {
3808 // Now squeeze out the current frame
3809 StgClosure *updatee_keep = prev_frame->updatee;
3810 StgClosure *updatee_bypass = frame->updatee;
3813 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3817 /* Deal with blocking queues. If both updatees have blocked
3818 * threads, then we should merge the queues into the update
3819 * frame that we're keeping.
3821 * Alternatively, we could just wake them up: they'll just go
3822 * straight to sleep on the proper blackhole! This is less code
3823 * and probably less bug prone, although it's probably much
3826 #if 0 // do it properly...
3827 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3828 # error Unimplemented lazy BH warning. (KSW 1999-01)
3830 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3831 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3833 // Sigh. It has one. Don't lose those threads!
3834 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3835 // Urgh. Two queues. Merge them.
3836 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3838 while (keep_tso->link != END_TSO_QUEUE) {
3839 keep_tso = keep_tso->link;
3841 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3844 // For simplicity, just swap the BQ for the BH
3845 P_ temp = updatee_keep;
3847 updatee_keep = updatee_bypass;
3848 updatee_bypass = temp;
3850 // Record the swap in the kept frame (below)
3851 prev_frame->updatee = updatee_keep;
3856 TICK_UPD_SQUEEZED();
3857 /* wasn't there something about update squeezing and ticky to be
3858 * sorted out? oh yes: we aren't counting each enter properly
3859 * in this case. See the log somewhere. KSW 1999-04-21
3861 * Check two things: that the two update frames don't point to
3862 * the same object, and that the updatee_bypass isn't already an
3863 * indirection. Both of these cases only happen when we're in a
3864 * block hole-style loop (and there are multiple update frames
3865 * on the stack pointing to the same closure), but they can both
3866 * screw us up if we don't check.
3868 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3869 // this wakes the threads up
3870 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3873 sp = (P_)frame - 1; // sp = stuff to slide
3874 displacement += sizeofW(StgUpdateFrame);
3877 // No squeeze for this frame
3878 sp = frame_bottom - 1; // Keep the current frame
3880 /* Do lazy black-holing.
3882 if (is_update_frame) {
3883 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3884 if (bh->header.info != &stg_BLACKHOLE_info &&
3885 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3886 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3887 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3888 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3891 /* zero out the slop so that the sanity checker can tell
3892 * where the next closure is.
3895 StgInfoTable *info = get_itbl(bh);
3896 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3897 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3898 * info is used for a different purpose, and it's exactly the
3899 * same size as a BLACKHOLE in any case.
3901 if (info->type != THUNK_SELECTOR) {
3902 for (i = np; i < np + nw; i++) {
3903 ((StgClosure *)bh)->payload[i] = 0;
3910 // We pretend that bh is now dead.
3911 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
3914 // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
3916 SET_INFO(bh,&stg_BLACKHOLE_info);
3919 // We pretend that bh has just been created.
3920 LDV_recordCreate(bh);
3925 // Fix the link in the current frame (should point to the frame below)
3926 frame->link = prev_frame;
3927 prev_was_update_frame = is_update_frame;
3930 // Now slide all words from sp up to the next frame
3932 if (displacement > 0) {
3933 P_ next_frame_bottom;
3935 if (next_frame != NULL)
3936 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3938 next_frame_bottom = tso->sp - 1;
3942 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3946 while (sp >= next_frame_bottom) {
3947 sp[displacement] = *sp;
3951 (P_)prev_frame = (P_)frame + displacement;
3955 tso->sp += displacement;
3956 tso->su = prev_frame;
3959 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3960 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3965 /* -----------------------------------------------------------------------------
3968 * We have to prepare for GC - this means doing lazy black holing
3969 * here. We also take the opportunity to do stack squeezing if it's
3971 * -------------------------------------------------------------------------- */
3973 threadPaused(StgTSO *tso)
3975 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3976 threadSqueezeStack(tso); // does black holing too
3978 threadLazyBlackHole(tso);
3981 /* -----------------------------------------------------------------------------
3983 * -------------------------------------------------------------------------- */
3987 printMutOnceList(generation *gen)
3989 StgMutClosure *p, *next;
3991 p = gen->mut_once_list;
3994 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3995 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3996 fprintf(stderr, "%p (%s), ",
3997 p, info_type((StgClosure *)p));
3999 fputc('\n', stderr);
4003 printMutableList(generation *gen)
4005 StgMutClosure *p, *next;
4010 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
4011 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
4012 fprintf(stderr, "%p (%s), ",
4013 p, info_type((StgClosure *)p));
4015 fputc('\n', stderr);
4018 static inline rtsBool
4019 maybeLarge(StgClosure *closure)
4021 StgInfoTable *info = get_itbl(closure);
4023 /* closure types that may be found on the new_large_objects list;
4024 see scavenge_large */
4025 return (info->type == MUT_ARR_PTRS ||
4026 info->type == MUT_ARR_PTRS_FROZEN ||
4027 info->type == TSO ||
4028 info->type == ARR_WORDS);