1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.95 2001/02/08 18:04:49 sewardj Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
45 #include "StablePriv.h"
47 #if defined(GRAN) || defined(PAR)
48 # include "GranSimRts.h"
49 # include "ParallelRts.h"
53 # include "ParallelDebug.h"
60 #if defined(RTS_GTK_FRONTPANEL)
61 #include "FrontPanel.h"
64 //@node STATIC OBJECT LIST, Static function declarations, Includes
65 //@subsection STATIC OBJECT LIST
67 /* STATIC OBJECT LIST.
70 * We maintain a linked list of static objects that are still live.
71 * The requirements for this list are:
73 * - we need to scan the list while adding to it, in order to
74 * scavenge all the static objects (in the same way that
75 * breadth-first scavenging works for dynamic objects).
77 * - we need to be able to tell whether an object is already on
78 * the list, to break loops.
80 * Each static object has a "static link field", which we use for
81 * linking objects on to the list. We use a stack-type list, consing
82 * objects on the front as they are added (this means that the
83 * scavenge phase is depth-first, not breadth-first, but that
86 * A separate list is kept for objects that have been scavenged
87 * already - this is so that we can zero all the marks afterwards.
89 * An object is on the list if its static link field is non-zero; this
90 * means that we have to mark the end of the list with '1', not NULL.
92 * Extra notes for generational GC:
94 * Each generation has a static object list associated with it. When
95 * collecting generations up to N, we treat the static object lists
96 * from generations > N as roots.
98 * We build up a static object list while collecting generations 0..N,
99 * which is then appended to the static object list of generation N+1.
101 StgClosure* static_objects; /* live static objects */
102 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
104 /* N is the oldest generation being collected, where the generations
105 * are numbered starting at 0. A major GC (indicated by the major_gc
106 * flag) is when we're collecting all generations. We only attempt to
107 * deal with static objects and GC CAFs when doing a major GC.
110 static rtsBool major_gc;
112 /* Youngest generation that objects should be evacuated to in
113 * evacuate(). (Logically an argument to evacuate, but it's static
114 * a lot of the time so we optimise it into a global variable).
120 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
121 static rtsBool weak_done; /* all done for this pass */
123 /* List of all threads during GC
125 static StgTSO *old_all_threads;
126 static StgTSO *resurrected_threads;
128 /* Flag indicating failure to evacuate an object to the desired
131 static rtsBool failed_to_evac;
133 /* Old to-space (used for two-space collector only)
135 bdescr *old_to_space;
137 /* Data used for allocation area sizing.
139 lnat new_blocks; /* blocks allocated during this GC */
140 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
142 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
143 //@subsection Static function declarations
145 /* -----------------------------------------------------------------------------
146 Static function declarations
147 -------------------------------------------------------------------------- */
149 static StgClosure * evacuate ( StgClosure *q );
150 static void zero_static_object_list ( StgClosure* first_static );
151 static void zero_mutable_list ( StgMutClosure *first );
153 static rtsBool traverse_weak_ptr_list ( void );
154 static void cleanup_weak_ptr_list ( StgWeak **list );
156 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
157 static void scavenge_large ( step * );
158 static void scavenge ( step * );
159 static void scavenge_static ( void );
160 static void scavenge_mutable_list ( generation *g );
161 static void scavenge_mut_once_list ( generation *g );
164 static void gcCAFs ( void );
168 void revertCAFs ( void );
169 void scavengeCAFs ( void );
172 //@node Garbage Collect, Weak Pointers, Static function declarations
173 //@subsection Garbage Collect
175 /* -----------------------------------------------------------------------------
178 For garbage collecting generation N (and all younger generations):
180 - follow all pointers in the root set. the root set includes all
181 mutable objects in all steps in all generations.
183 - for each pointer, evacuate the object it points to into either
184 + to-space in the next higher step in that generation, if one exists,
185 + if the object's generation == N, then evacuate it to the next
186 generation if one exists, or else to-space in the current
188 + if the object's generation < N, then evacuate it to to-space
189 in the next generation.
191 - repeatedly scavenge to-space from each step in each generation
192 being collected until no more objects can be evacuated.
194 - free from-space in each step, and set from-space = to-space.
196 -------------------------------------------------------------------------- */
197 //@cindex GarbageCollect
199 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
203 lnat live, allocated, collected = 0, copied = 0;
207 CostCentreStack *prev_CCS;
210 #if defined(DEBUG) && defined(GRAN)
211 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
215 /* tell the stats department that we've started a GC */
218 /* attribute any costs to CCS_GC */
224 /* Approximate how much we allocated.
225 * Todo: only when generating stats?
227 allocated = calcAllocated();
229 /* Figure out which generation to collect
231 if (force_major_gc) {
232 N = RtsFlags.GcFlags.generations - 1;
236 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
237 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
241 major_gc = (N == RtsFlags.GcFlags.generations-1);
244 #ifdef RTS_GTK_FRONTPANEL
245 if (RtsFlags.GcFlags.frontpanel) {
246 updateFrontPanelBeforeGC(N);
250 /* check stack sanity *before* GC (ToDo: check all threads) */
252 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
254 IF_DEBUG(sanity, checkFreeListSanity());
256 /* Initialise the static object lists
258 static_objects = END_OF_STATIC_LIST;
259 scavenged_static_objects = END_OF_STATIC_LIST;
261 /* zero the mutable list for the oldest generation (see comment by
262 * zero_mutable_list below).
265 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
268 /* Save the old to-space if we're doing a two-space collection
270 if (RtsFlags.GcFlags.generations == 1) {
271 old_to_space = g0s0->to_space;
272 g0s0->to_space = NULL;
275 /* Keep a count of how many new blocks we allocated during this GC
276 * (used for resizing the allocation area, later).
280 /* Initialise to-space in all the generations/steps that we're
283 for (g = 0; g <= N; g++) {
284 generations[g].mut_once_list = END_MUT_LIST;
285 generations[g].mut_list = END_MUT_LIST;
287 for (s = 0; s < generations[g].n_steps; s++) {
289 /* generation 0, step 0 doesn't need to-space */
290 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
294 /* Get a free block for to-space. Extra blocks will be chained on
298 stp = &generations[g].steps[s];
299 ASSERT(stp->gen->no == g);
300 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
301 bd->gen = &generations[g];
304 bd->evacuated = 1; /* it's a to-space block */
306 stp->hpLim = stp->hp + BLOCK_SIZE_W;
310 stp->scan = bd->start;
312 stp->new_large_objects = NULL;
313 stp->scavenged_large_objects = NULL;
315 /* mark the large objects as not evacuated yet */
316 for (bd = stp->large_objects; bd; bd = bd->link) {
322 /* make sure the older generations have at least one block to
323 * allocate into (this makes things easier for copy(), see below.
325 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
326 for (s = 0; s < generations[g].n_steps; s++) {
327 stp = &generations[g].steps[s];
328 if (stp->hp_bd == NULL) {
330 bd->gen = &generations[g];
333 bd->evacuated = 0; /* *not* a to-space block */
335 stp->hpLim = stp->hp + BLOCK_SIZE_W;
341 /* Set the scan pointer for older generations: remember we
342 * still have to scavenge objects that have been promoted. */
344 stp->scan_bd = stp->hp_bd;
345 stp->to_space = NULL;
347 stp->new_large_objects = NULL;
348 stp->scavenged_large_objects = NULL;
352 /* -----------------------------------------------------------------------
353 * follow all the roots that we know about:
354 * - mutable lists from each generation > N
355 * we want to *scavenge* these roots, not evacuate them: they're not
356 * going to move in this GC.
357 * Also: do them in reverse generation order. This is because we
358 * often want to promote objects that are pointed to by older
359 * generations early, so we don't have to repeatedly copy them.
360 * Doing the generations in reverse order ensures that we don't end
361 * up in the situation where we want to evac an object to gen 3 and
362 * it has already been evaced to gen 2.
366 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
367 generations[g].saved_mut_list = generations[g].mut_list;
368 generations[g].mut_list = END_MUT_LIST;
371 /* Do the mut-once lists first */
372 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
373 IF_PAR_DEBUG(verbose,
374 printMutOnceList(&generations[g]));
375 scavenge_mut_once_list(&generations[g]);
377 for (st = generations[g].n_steps-1; st >= 0; st--) {
378 scavenge(&generations[g].steps[st]);
382 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
383 IF_PAR_DEBUG(verbose,
384 printMutableList(&generations[g]));
385 scavenge_mutable_list(&generations[g]);
387 for (st = generations[g].n_steps-1; st >= 0; st--) {
388 scavenge(&generations[g].steps[st]);
397 /* follow all the roots that the application knows about.
403 /* And don't forget to mark the TSO if we got here direct from
405 /* Not needed in a seq version?
407 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
411 /* Mark the entries in the GALA table of the parallel system */
412 markLocalGAs(major_gc);
415 /* Mark the weak pointer list, and prepare to detect dead weak
418 old_weak_ptr_list = weak_ptr_list;
419 weak_ptr_list = NULL;
420 weak_done = rtsFalse;
422 /* The all_threads list is like the weak_ptr_list.
423 * See traverse_weak_ptr_list() for the details.
425 old_all_threads = all_threads;
426 all_threads = END_TSO_QUEUE;
427 resurrected_threads = END_TSO_QUEUE;
429 /* Mark the stable pointer table.
431 markStablePtrTable(major_gc);
435 /* ToDo: To fix the caf leak, we need to make the commented out
436 * parts of this code do something sensible - as described in
439 extern void markHugsObjects(void);
444 /* -------------------------------------------------------------------------
445 * Repeatedly scavenge all the areas we know about until there's no
446 * more scavenging to be done.
453 /* scavenge static objects */
454 if (major_gc && static_objects != END_OF_STATIC_LIST) {
456 checkStaticObjects());
460 /* When scavenging the older generations: Objects may have been
461 * evacuated from generations <= N into older generations, and we
462 * need to scavenge these objects. We're going to try to ensure that
463 * any evacuations that occur move the objects into at least the
464 * same generation as the object being scavenged, otherwise we
465 * have to create new entries on the mutable list for the older
469 /* scavenge each step in generations 0..maxgen */
473 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
474 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
475 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
478 stp = &generations[gen].steps[st];
480 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
485 if (stp->new_large_objects != NULL) {
493 if (flag) { goto loop; }
495 /* must be last... */
496 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
501 /* Final traversal of the weak pointer list (see comment by
502 * cleanUpWeakPtrList below).
504 cleanup_weak_ptr_list(&weak_ptr_list);
506 /* Now see which stable names are still alive.
508 gcStablePtrTable(major_gc);
511 /* Reconstruct the Global Address tables used in GUM */
512 rebuildGAtables(major_gc);
513 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
514 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
517 /* Set the maximum blocks for the oldest generation, based on twice
518 * the amount of live data now, adjusted to fit the maximum heap
521 * This is an approximation, since in the worst case we'll need
522 * twice the amount of live data plus whatever space the other
525 if (RtsFlags.GcFlags.generations > 1) {
527 oldest_gen->max_blocks =
528 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
529 RtsFlags.GcFlags.minOldGenSize);
530 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
531 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
532 if (((int)oldest_gen->max_blocks -
533 (int)oldest_gen->steps[0].to_blocks) <
534 (RtsFlags.GcFlags.pcFreeHeap *
535 RtsFlags.GcFlags.maxHeapSize / 200)) {
542 /* run through all the generations/steps and tidy up
544 copied = new_blocks * BLOCK_SIZE_W;
545 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
548 generations[g].collections++; /* for stats */
551 for (s = 0; s < generations[g].n_steps; s++) {
553 stp = &generations[g].steps[s];
555 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
556 /* Tidy the end of the to-space chains */
557 stp->hp_bd->free = stp->hp;
558 stp->hp_bd->link = NULL;
559 /* stats information: how much we copied */
561 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
566 /* for generations we collected... */
569 collected += stp->n_blocks * BLOCK_SIZE_W; /* for stats */
571 /* free old memory and shift to-space into from-space for all
572 * the collected steps (except the allocation area). These
573 * freed blocks will probaby be quickly recycled.
575 if (!(g == 0 && s == 0)) {
576 freeChain(stp->blocks);
577 stp->blocks = stp->to_space;
578 stp->n_blocks = stp->to_blocks;
579 stp->to_space = NULL;
581 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
582 bd->evacuated = 0; /* now from-space */
586 /* LARGE OBJECTS. The current live large objects are chained on
587 * scavenged_large, having been moved during garbage
588 * collection from large_objects. Any objects left on
589 * large_objects list are therefore dead, so we free them here.
591 for (bd = stp->large_objects; bd != NULL; bd = next) {
596 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
599 stp->large_objects = stp->scavenged_large_objects;
601 /* Set the maximum blocks for this generation, interpolating
602 * between the maximum size of the oldest and youngest
605 * max_blocks = oldgen_max_blocks * G
606 * ----------------------
611 generations[g].max_blocks = (oldest_gen->max_blocks * g)
612 / (RtsFlags.GcFlags.generations-1);
614 generations[g].max_blocks = oldest_gen->max_blocks;
617 /* for older generations... */
620 /* For older generations, we need to append the
621 * scavenged_large_object list (i.e. large objects that have been
622 * promoted during this GC) to the large_object list for that step.
624 for (bd = stp->scavenged_large_objects; bd; bd = next) {
627 dbl_link_onto(bd, &stp->large_objects);
630 /* add the new blocks we promoted during this GC */
631 stp->n_blocks += stp->to_blocks;
636 /* Guess the amount of live data for stats. */
639 /* Free the small objects allocated via allocate(), since this will
640 * all have been copied into G0S1 now.
642 if (small_alloc_list != NULL) {
643 freeChain(small_alloc_list);
645 small_alloc_list = NULL;
649 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
651 /* Two-space collector:
652 * Free the old to-space, and estimate the amount of live data.
654 if (RtsFlags.GcFlags.generations == 1) {
657 if (old_to_space != NULL) {
658 freeChain(old_to_space);
660 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
661 bd->evacuated = 0; /* now from-space */
664 /* For a two-space collector, we need to resize the nursery. */
666 /* set up a new nursery. Allocate a nursery size based on a
667 * function of the amount of live data (currently a factor of 2,
668 * should be configurable (ToDo)). Use the blocks from the old
669 * nursery if possible, freeing up any left over blocks.
671 * If we get near the maximum heap size, then adjust our nursery
672 * size accordingly. If the nursery is the same size as the live
673 * data (L), then we need 3L bytes. We can reduce the size of the
674 * nursery to bring the required memory down near 2L bytes.
676 * A normal 2-space collector would need 4L bytes to give the same
677 * performance we get from 3L bytes, reducing to the same
678 * performance at 2L bytes.
680 blocks = g0s0->to_blocks;
682 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
683 RtsFlags.GcFlags.maxHeapSize ) {
684 int adjusted_blocks; /* signed on purpose */
687 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
688 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
689 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
690 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
693 blocks = adjusted_blocks;
696 blocks *= RtsFlags.GcFlags.oldGenFactor;
697 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
698 blocks = RtsFlags.GcFlags.minAllocAreaSize;
701 resizeNursery(blocks);
704 /* Generational collector:
705 * If the user has given us a suggested heap size, adjust our
706 * allocation area to make best use of the memory available.
709 if (RtsFlags.GcFlags.heapSizeSuggestion) {
711 nat needed = calcNeeded(); /* approx blocks needed at next GC */
713 /* Guess how much will be live in generation 0 step 0 next time.
714 * A good approximation is the obtained by finding the
715 * percentage of g0s0 that was live at the last minor GC.
718 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
721 /* Estimate a size for the allocation area based on the
722 * information available. We might end up going slightly under
723 * or over the suggested heap size, but we should be pretty
726 * Formula: suggested - needed
727 * ----------------------------
728 * 1 + g0s0_pcnt_kept/100
730 * where 'needed' is the amount of memory needed at the next
731 * collection for collecting all steps except g0s0.
734 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
735 (100 + (int)g0s0_pcnt_kept);
737 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
738 blocks = RtsFlags.GcFlags.minAllocAreaSize;
741 resizeNursery((nat)blocks);
745 /* mark the garbage collected CAFs as dead */
746 #if defined(DEBUG) && !defined(GHCI)
747 if (major_gc) { gcCAFs(); } /* doesn't work w/ GHCI */
750 /* zero the scavenged static object list */
752 zero_static_object_list(scavenged_static_objects);
759 /* start any pending finalizers */
760 scheduleFinalizers(old_weak_ptr_list);
762 /* send exceptions to any threads which were about to die */
763 resurrectThreads(resurrected_threads);
765 /* check sanity after GC */
766 IF_DEBUG(sanity, checkSanity(N));
768 /* extra GC trace info */
769 IF_DEBUG(gc, stat_describe_gens());
772 /* symbol-table based profiling */
773 /* heapCensus(to_space); */ /* ToDo */
776 /* restore enclosing cost centre */
782 /* check for memory leaks if sanity checking is on */
783 IF_DEBUG(sanity, memInventory());
785 #ifdef RTS_GTK_FRONTPANEL
786 if (RtsFlags.GcFlags.frontpanel) {
787 updateFrontPanelAfterGC( N, live );
791 /* ok, GC over: tell the stats department what happened. */
792 stat_endGC(allocated, collected, live, copied, N);
795 //@node Weak Pointers, Evacuation, Garbage Collect
796 //@subsection Weak Pointers
798 /* -----------------------------------------------------------------------------
801 traverse_weak_ptr_list is called possibly many times during garbage
802 collection. It returns a flag indicating whether it did any work
803 (i.e. called evacuate on any live pointers).
805 Invariant: traverse_weak_ptr_list is called when the heap is in an
806 idempotent state. That means that there are no pending
807 evacuate/scavenge operations. This invariant helps the weak
808 pointer code decide which weak pointers are dead - if there are no
809 new live weak pointers, then all the currently unreachable ones are
812 For generational GC: we just don't try to finalize weak pointers in
813 older generations than the one we're collecting. This could
814 probably be optimised by keeping per-generation lists of weak
815 pointers, but for a few weak pointers this scheme will work.
816 -------------------------------------------------------------------------- */
817 //@cindex traverse_weak_ptr_list
820 traverse_weak_ptr_list(void)
822 StgWeak *w, **last_w, *next_w;
824 rtsBool flag = rtsFalse;
826 if (weak_done) { return rtsFalse; }
828 /* doesn't matter where we evacuate values/finalizers to, since
829 * these pointers are treated as roots (iff the keys are alive).
833 last_w = &old_weak_ptr_list;
834 for (w = old_weak_ptr_list; w; w = next_w) {
836 /* First, this weak pointer might have been evacuated. If so,
837 * remove the forwarding pointer from the weak_ptr_list.
839 if (get_itbl(w)->type == EVACUATED) {
840 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
844 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
845 * called on a live weak pointer object. Just remove it.
847 if (w->header.info == &stg_DEAD_WEAK_info) {
848 next_w = ((StgDeadWeak *)w)->link;
853 ASSERT(get_itbl(w)->type == WEAK);
855 /* Now, check whether the key is reachable.
857 if ((new = isAlive(w->key))) {
859 /* evacuate the value and finalizer */
860 w->value = evacuate(w->value);
861 w->finalizer = evacuate(w->finalizer);
862 /* remove this weak ptr from the old_weak_ptr list */
864 /* and put it on the new weak ptr list */
866 w->link = weak_ptr_list;
869 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
879 /* Now deal with the all_threads list, which behaves somewhat like
880 * the weak ptr list. If we discover any threads that are about to
881 * become garbage, we wake them up and administer an exception.
884 StgTSO *t, *tmp, *next, **prev;
886 prev = &old_all_threads;
887 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
889 /* Threads which have finished or died get dropped from
892 switch (t->what_next) {
893 case ThreadRelocated:
899 next = t->global_link;
905 /* Threads which have already been determined to be alive are
906 * moved onto the all_threads list.
908 (StgClosure *)tmp = isAlive((StgClosure *)t);
910 next = tmp->global_link;
911 tmp->global_link = all_threads;
915 prev = &(t->global_link);
916 next = t->global_link;
921 /* If we didn't make any changes, then we can go round and kill all
922 * the dead weak pointers. The old_weak_ptr list is used as a list
923 * of pending finalizers later on.
925 if (flag == rtsFalse) {
926 cleanup_weak_ptr_list(&old_weak_ptr_list);
927 for (w = old_weak_ptr_list; w; w = w->link) {
928 w->finalizer = evacuate(w->finalizer);
931 /* And resurrect any threads which were about to become garbage.
934 StgTSO *t, *tmp, *next;
935 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
936 next = t->global_link;
937 (StgClosure *)tmp = evacuate((StgClosure *)t);
938 tmp->global_link = resurrected_threads;
939 resurrected_threads = tmp;
949 /* -----------------------------------------------------------------------------
950 After GC, the live weak pointer list may have forwarding pointers
951 on it, because a weak pointer object was evacuated after being
952 moved to the live weak pointer list. We remove those forwarding
955 Also, we don't consider weak pointer objects to be reachable, but
956 we must nevertheless consider them to be "live" and retain them.
957 Therefore any weak pointer objects which haven't as yet been
958 evacuated need to be evacuated now.
959 -------------------------------------------------------------------------- */
961 //@cindex cleanup_weak_ptr_list
964 cleanup_weak_ptr_list ( StgWeak **list )
966 StgWeak *w, **last_w;
969 for (w = *list; w; w = w->link) {
971 if (get_itbl(w)->type == EVACUATED) {
972 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
976 if (Bdescr((P_)w)->evacuated == 0) {
977 (StgClosure *)w = evacuate((StgClosure *)w);
984 /* -----------------------------------------------------------------------------
985 isAlive determines whether the given closure is still alive (after
986 a garbage collection) or not. It returns the new address of the
987 closure if it is alive, or NULL otherwise.
988 -------------------------------------------------------------------------- */
993 isAlive(StgClosure *p)
995 const StgInfoTable *info;
1002 /* ToDo: for static closures, check the static link field.
1003 * Problem here is that we sometimes don't set the link field, eg.
1004 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1007 /* ignore closures in generations that we're not collecting. */
1008 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
1012 switch (info->type) {
1017 case IND_OLDGEN: /* rely on compatible layout with StgInd */
1018 case IND_OLDGEN_PERM:
1019 /* follow indirections */
1020 p = ((StgInd *)p)->indirectee;
1025 return ((StgEvacuated *)p)->evacuee;
1028 size = arr_words_sizeW((StgArrWords *)p);
1032 case MUT_ARR_PTRS_FROZEN:
1033 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1037 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1038 p = (StgClosure *)((StgTSO *)p)->link;
1042 size = tso_sizeW((StgTSO *)p);
1044 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1045 && Bdescr((P_)p)->evacuated)
1059 MarkRoot(StgClosure *root)
1061 # if 0 && defined(PAR) && defined(DEBUG)
1062 StgClosure *foo = evacuate(root);
1063 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1064 ASSERT(isAlive(foo)); // must be in to-space
1067 return evacuate(root);
1072 static void addBlock(step *stp)
1074 bdescr *bd = allocBlock();
1078 if (stp->gen->no <= N) {
1084 stp->hp_bd->free = stp->hp;
1085 stp->hp_bd->link = bd;
1086 stp->hp = bd->start;
1087 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1093 //@cindex upd_evacuee
1095 static __inline__ void
1096 upd_evacuee(StgClosure *p, StgClosure *dest)
1098 p->header.info = &stg_EVACUATED_info;
1099 ((StgEvacuated *)p)->evacuee = dest;
1104 static __inline__ StgClosure *
1105 copy(StgClosure *src, nat size, step *stp)
1109 TICK_GC_WORDS_COPIED(size);
1110 /* Find out where we're going, using the handy "to" pointer in
1111 * the step of the source object. If it turns out we need to
1112 * evacuate to an older generation, adjust it here (see comment
1115 if (stp->gen->no < evac_gen) {
1116 #ifdef NO_EAGER_PROMOTION
1117 failed_to_evac = rtsTrue;
1119 stp = &generations[evac_gen].steps[0];
1123 /* chain a new block onto the to-space for the destination step if
1126 if (stp->hp + size >= stp->hpLim) {
1130 for(to = stp->hp, from = (P_)src; size>0; --size) {
1136 upd_evacuee(src,(StgClosure *)dest);
1137 return (StgClosure *)dest;
1140 /* Special version of copy() for when we only want to copy the info
1141 * pointer of an object, but reserve some padding after it. This is
1142 * used to optimise evacuation of BLACKHOLEs.
1147 static __inline__ StgClosure *
1148 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1152 TICK_GC_WORDS_COPIED(size_to_copy);
1153 if (stp->gen->no < evac_gen) {
1154 #ifdef NO_EAGER_PROMOTION
1155 failed_to_evac = rtsTrue;
1157 stp = &generations[evac_gen].steps[0];
1161 if (stp->hp + size_to_reserve >= stp->hpLim) {
1165 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1170 stp->hp += size_to_reserve;
1171 upd_evacuee(src,(StgClosure *)dest);
1172 return (StgClosure *)dest;
1175 //@node Evacuation, Scavenging, Weak Pointers
1176 //@subsection Evacuation
1178 /* -----------------------------------------------------------------------------
1179 Evacuate a large object
1181 This just consists of removing the object from the (doubly-linked)
1182 large_alloc_list, and linking it on to the (singly-linked)
1183 new_large_objects list, from where it will be scavenged later.
1185 Convention: bd->evacuated is /= 0 for a large object that has been
1186 evacuated, or 0 otherwise.
1187 -------------------------------------------------------------------------- */
1189 //@cindex evacuate_large
1192 evacuate_large(StgPtr p, rtsBool mutable)
1194 bdescr *bd = Bdescr(p);
1197 /* should point to the beginning of the block */
1198 ASSERT(((W_)p & BLOCK_MASK) == 0);
1200 /* already evacuated? */
1201 if (bd->evacuated) {
1202 /* Don't forget to set the failed_to_evac flag if we didn't get
1203 * the desired destination (see comments in evacuate()).
1205 if (bd->gen->no < evac_gen) {
1206 failed_to_evac = rtsTrue;
1207 TICK_GC_FAILED_PROMOTION();
1213 /* remove from large_object list */
1215 bd->back->link = bd->link;
1216 } else { /* first object in the list */
1217 stp->large_objects = bd->link;
1220 bd->link->back = bd->back;
1223 /* link it on to the evacuated large object list of the destination step
1226 if (stp->gen->no < evac_gen) {
1227 #ifdef NO_EAGER_PROMOTION
1228 failed_to_evac = rtsTrue;
1230 stp = &generations[evac_gen].steps[0];
1236 bd->link = stp->new_large_objects;
1237 stp->new_large_objects = bd;
1241 recordMutable((StgMutClosure *)p);
1245 /* -----------------------------------------------------------------------------
1246 Adding a MUT_CONS to an older generation.
1248 This is necessary from time to time when we end up with an
1249 old-to-new generation pointer in a non-mutable object. We defer
1250 the promotion until the next GC.
1251 -------------------------------------------------------------------------- */
1256 mkMutCons(StgClosure *ptr, generation *gen)
1261 stp = &gen->steps[0];
1263 /* chain a new block onto the to-space for the destination step if
1266 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1270 q = (StgMutVar *)stp->hp;
1271 stp->hp += sizeofW(StgMutVar);
1273 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1275 recordOldToNewPtrs((StgMutClosure *)q);
1277 return (StgClosure *)q;
1280 /* -----------------------------------------------------------------------------
1283 This is called (eventually) for every live object in the system.
1285 The caller to evacuate specifies a desired generation in the
1286 evac_gen global variable. The following conditions apply to
1287 evacuating an object which resides in generation M when we're
1288 collecting up to generation N
1292 else evac to step->to
1294 if M < evac_gen evac to evac_gen, step 0
1296 if the object is already evacuated, then we check which generation
1299 if M >= evac_gen do nothing
1300 if M < evac_gen set failed_to_evac flag to indicate that we
1301 didn't manage to evacuate this object into evac_gen.
1303 -------------------------------------------------------------------------- */
1307 evacuate(StgClosure *q)
1312 const StgInfoTable *info;
1315 if (HEAP_ALLOCED(q)) {
1317 if (bd->gen->no > N) {
1318 /* Can't evacuate this object, because it's in a generation
1319 * older than the ones we're collecting. Let's hope that it's
1320 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1322 if (bd->gen->no < evac_gen) {
1324 failed_to_evac = rtsTrue;
1325 TICK_GC_FAILED_PROMOTION();
1332 else stp = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1335 /* make sure the info pointer is into text space */
1336 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1337 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1340 if (info->type==RBH) {
1341 info = REVERT_INFOPTR(info);
1343 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1344 q, info_type(q), info, info_type_by_ip(info)));
1348 switch (info -> type) {
1351 ASSERT(q->header.info != &stg_MUT_CONS_info);
1353 to = copy(q,sizeW_fromITBL(info),stp);
1354 recordMutable((StgMutClosure *)to);
1359 StgWord w = (StgWord)q->payload[0];
1360 if (q->header.info == Czh_con_info &&
1361 /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */
1362 (StgChar)w <= MAX_CHARLIKE) {
1363 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1365 if (q->header.info == Izh_con_info &&
1366 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1367 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1369 /* else, fall through ... */
1375 return copy(q,sizeofW(StgHeader)+1,stp);
1377 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1382 #ifdef NO_PROMOTE_THUNKS
1383 if (bd->gen->no == 0 &&
1384 bd->step->no != 0 &&
1385 bd->step->no == bd->gen->n_steps-1) {
1389 return copy(q,sizeofW(StgHeader)+2,stp);
1397 return copy(q,sizeofW(StgHeader)+2,stp);
1403 case IND_OLDGEN_PERM:
1408 return copy(q,sizeW_fromITBL(info),stp);
1411 case SE_CAF_BLACKHOLE:
1414 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1417 to = copy(q,BLACKHOLE_sizeW(),stp);
1418 recordMutable((StgMutClosure *)to);
1421 case THUNK_SELECTOR:
1423 const StgInfoTable* selectee_info;
1424 StgClosure* selectee = ((StgSelector*)q)->selectee;
1427 selectee_info = get_itbl(selectee);
1428 switch (selectee_info->type) {
1437 StgWord32 offset = info->layout.selector_offset;
1439 /* check that the size is in range */
1441 (StgWord32)(selectee_info->layout.payload.ptrs +
1442 selectee_info->layout.payload.nptrs));
1444 /* perform the selection! */
1445 q = selectee->payload[offset];
1447 /* if we're already in to-space, there's no need to continue
1448 * with the evacuation, just update the source address with
1449 * a pointer to the (evacuated) constructor field.
1451 if (HEAP_ALLOCED(q)) {
1452 bdescr *bd = Bdescr((P_)q);
1453 if (bd->evacuated) {
1454 if (bd->gen->no < evac_gen) {
1455 failed_to_evac = rtsTrue;
1456 TICK_GC_FAILED_PROMOTION();
1462 /* otherwise, carry on and evacuate this constructor field,
1463 * (but not the constructor itself)
1472 case IND_OLDGEN_PERM:
1473 selectee = ((StgInd *)selectee)->indirectee;
1477 selectee = ((StgEvacuated *)selectee)->evacuee;
1488 case THUNK_SELECTOR:
1489 /* aargh - do recursively???? */
1491 case SE_CAF_BLACKHOLE:
1495 /* not evaluated yet */
1499 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1500 (int)(selectee_info->type));
1503 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1507 /* follow chains of indirections, don't evacuate them */
1508 q = ((StgInd*)q)->indirectee;
1512 if (info->srt_len > 0 && major_gc &&
1513 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1514 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1515 static_objects = (StgClosure *)q;
1520 if (info->srt_len > 0 && major_gc &&
1521 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1522 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1523 static_objects = (StgClosure *)q;
1529 /* a revertible CAF - it'll be on the CAF list, so don't do
1530 * anything with it here (we'll scavenge it later).
1532 if (((StgIndStatic *)q)->saved_info != NULL) {
1536 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1537 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1538 static_objects = (StgClosure *)q;
1543 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1544 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1545 static_objects = (StgClosure *)q;
1549 case CONSTR_INTLIKE:
1550 case CONSTR_CHARLIKE:
1551 case CONSTR_NOCAF_STATIC:
1552 /* no need to put these on the static linked list, they don't need
1567 /* shouldn't see these */
1568 barf("evacuate: stack frame at %p\n", q);
1572 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1573 * of stack, tagging and all.
1575 * They can be larger than a block in size. Both are only
1576 * allocated via allocate(), so they should be chained on to the
1577 * large_object list.
1580 nat size = pap_sizeW((StgPAP*)q);
1581 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1582 evacuate_large((P_)q, rtsFalse);
1585 return copy(q,size,stp);
1590 /* Already evacuated, just return the forwarding address.
1591 * HOWEVER: if the requested destination generation (evac_gen) is
1592 * older than the actual generation (because the object was
1593 * already evacuated to a younger generation) then we have to
1594 * set the failed_to_evac flag to indicate that we couldn't
1595 * manage to promote the object to the desired generation.
1597 if (evac_gen > 0) { /* optimisation */
1598 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1599 if (Bdescr((P_)p)->gen->no < evac_gen) {
1600 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1601 failed_to_evac = rtsTrue;
1602 TICK_GC_FAILED_PROMOTION();
1605 return ((StgEvacuated*)q)->evacuee;
1609 nat size = arr_words_sizeW((StgArrWords *)q);
1611 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1612 evacuate_large((P_)q, rtsFalse);
1615 /* just copy the block */
1616 return copy(q,size,stp);
1621 case MUT_ARR_PTRS_FROZEN:
1623 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1625 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1626 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1629 /* just copy the block */
1630 to = copy(q,size,stp);
1631 if (info->type == MUT_ARR_PTRS) {
1632 recordMutable((StgMutClosure *)to);
1640 StgTSO *tso = (StgTSO *)q;
1641 nat size = tso_sizeW(tso);
1644 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1646 if (tso->what_next == ThreadRelocated) {
1647 q = (StgClosure *)tso->link;
1651 /* Large TSOs don't get moved, so no relocation is required.
1653 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1654 evacuate_large((P_)q, rtsTrue);
1657 /* To evacuate a small TSO, we need to relocate the update frame
1661 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1663 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1665 /* relocate the stack pointers... */
1666 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1667 new_tso->sp = (StgPtr)new_tso->sp + diff;
1669 relocate_TSO(tso, new_tso);
1671 recordMutable((StgMutClosure *)new_tso);
1672 return (StgClosure *)new_tso;
1677 case RBH: // cf. BLACKHOLE_BQ
1679 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1680 to = copy(q,BLACKHOLE_sizeW(),stp);
1681 //ToDo: derive size etc from reverted IP
1682 //to = copy(q,size,stp);
1683 recordMutable((StgMutClosure *)to);
1685 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1686 q, info_type(q), to, info_type(to)));
1691 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1692 to = copy(q,sizeofW(StgBlockedFetch),stp);
1694 belch("@@ evacuate: %p (%s) to %p (%s)",
1695 q, info_type(q), to, info_type(to)));
1699 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1700 to = copy(q,sizeofW(StgFetchMe),stp);
1702 belch("@@ evacuate: %p (%s) to %p (%s)",
1703 q, info_type(q), to, info_type(to)));
1707 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1708 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1710 belch("@@ evacuate: %p (%s) to %p (%s)",
1711 q, info_type(q), to, info_type(to)));
1716 barf("evacuate: strange closure type %d", (int)(info->type));
1722 /* -----------------------------------------------------------------------------
1723 relocate_TSO is called just after a TSO has been copied from src to
1724 dest. It adjusts the update frame list for the new location.
1725 -------------------------------------------------------------------------- */
1726 //@cindex relocate_TSO
1729 relocate_TSO(StgTSO *src, StgTSO *dest)
1736 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1740 while ((P_)su < dest->stack + dest->stack_size) {
1741 switch (get_itbl(su)->type) {
1743 /* GCC actually manages to common up these three cases! */
1746 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1751 cf = (StgCatchFrame *)su;
1752 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1757 sf = (StgSeqFrame *)su;
1758 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1767 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1775 //@node Scavenging, Reverting CAFs, Evacuation
1776 //@subsection Scavenging
1778 //@cindex scavenge_srt
1781 scavenge_srt(const StgInfoTable *info)
1783 StgClosure **srt, **srt_end;
1785 /* evacuate the SRT. If srt_len is zero, then there isn't an
1786 * srt field in the info table. That's ok, because we'll
1787 * never dereference it.
1789 srt = (StgClosure **)(info->srt);
1790 srt_end = srt + info->srt_len;
1791 for (; srt < srt_end; srt++) {
1792 /* Special-case to handle references to closures hiding out in DLLs, since
1793 double indirections required to get at those. The code generator knows
1794 which is which when generating the SRT, so it stores the (indirect)
1795 reference to the DLL closure in the table by first adding one to it.
1796 We check for this here, and undo the addition before evacuating it.
1798 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1799 closure that's fixed at link-time, and no extra magic is required.
1801 #ifdef ENABLE_WIN32_DLL_SUPPORT
1802 if ( (unsigned long)(*srt) & 0x1 ) {
1803 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1813 /* -----------------------------------------------------------------------------
1815 -------------------------------------------------------------------------- */
1818 scavengeTSO (StgTSO *tso)
1820 /* chase the link field for any TSOs on the same queue */
1821 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1822 if ( tso->why_blocked == BlockedOnMVar
1823 || tso->why_blocked == BlockedOnBlackHole
1824 || tso->why_blocked == BlockedOnException
1826 || tso->why_blocked == BlockedOnGA
1827 || tso->why_blocked == BlockedOnGA_NoSend
1830 tso->block_info.closure = evacuate(tso->block_info.closure);
1832 if ( tso->blocked_exceptions != NULL ) {
1833 tso->blocked_exceptions =
1834 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1836 /* scavenge this thread's stack */
1837 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1840 /* -----------------------------------------------------------------------------
1841 Scavenge a given step until there are no more objects in this step
1844 evac_gen is set by the caller to be either zero (for a step in a
1845 generation < N) or G where G is the generation of the step being
1848 We sometimes temporarily change evac_gen back to zero if we're
1849 scavenging a mutable object where early promotion isn't such a good
1851 -------------------------------------------------------------------------- */
1858 const StgInfoTable *info;
1860 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1865 failed_to_evac = rtsFalse;
1867 /* scavenge phase - standard breadth-first scavenging of the
1871 while (bd != stp->hp_bd || p < stp->hp) {
1873 /* If we're at the end of this block, move on to the next block */
1874 if (bd != stp->hp_bd && p == bd->free) {
1880 q = p; /* save ptr to object */
1882 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1883 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1885 info = get_itbl((StgClosure *)p);
1887 if (info->type==RBH)
1888 info = REVERT_INFOPTR(info);
1891 switch (info -> type) {
1894 /* treat MVars specially, because we don't want to evacuate the
1895 * mut_link field in the middle of the closure.
1898 StgMVar *mvar = ((StgMVar *)p);
1900 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1901 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1902 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1903 p += sizeofW(StgMVar);
1904 evac_gen = saved_evac_gen;
1912 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1913 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1914 p += sizeofW(StgHeader) + 2;
1919 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1920 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1926 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1927 p += sizeofW(StgHeader) + 1;
1932 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1938 p += sizeofW(StgHeader) + 1;
1945 p += sizeofW(StgHeader) + 2;
1952 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1953 p += sizeofW(StgHeader) + 2;
1969 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1970 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1971 (StgClosure *)*p = evacuate((StgClosure *)*p);
1973 p += info->layout.payload.nptrs;
1978 if (stp->gen->no != 0) {
1979 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
1982 case IND_OLDGEN_PERM:
1983 ((StgIndOldGen *)p)->indirectee =
1984 evacuate(((StgIndOldGen *)p)->indirectee);
1985 if (failed_to_evac) {
1986 failed_to_evac = rtsFalse;
1987 recordOldToNewPtrs((StgMutClosure *)p);
1989 p += sizeofW(StgIndOldGen);
1993 /* ignore MUT_CONSs */
1994 if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) {
1996 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1997 evac_gen = saved_evac_gen;
1999 p += sizeofW(StgMutVar);
2003 case SE_CAF_BLACKHOLE:
2006 p += BLACKHOLE_sizeW();
2011 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2012 (StgClosure *)bh->blocking_queue =
2013 evacuate((StgClosure *)bh->blocking_queue);
2014 if (failed_to_evac) {
2015 failed_to_evac = rtsFalse;
2016 recordMutable((StgMutClosure *)bh);
2018 p += BLACKHOLE_sizeW();
2022 case THUNK_SELECTOR:
2024 StgSelector *s = (StgSelector *)p;
2025 s->selectee = evacuate(s->selectee);
2026 p += THUNK_SELECTOR_sizeW();
2032 barf("scavenge:IND???\n");
2034 case CONSTR_INTLIKE:
2035 case CONSTR_CHARLIKE:
2037 case CONSTR_NOCAF_STATIC:
2041 /* Shouldn't see a static object here. */
2042 barf("scavenge: STATIC object\n");
2054 /* Shouldn't see stack frames here. */
2055 barf("scavenge: stack frame\n");
2057 case AP_UPD: /* same as PAPs */
2059 /* Treat a PAP just like a section of stack, not forgetting to
2060 * evacuate the function pointer too...
2063 StgPAP* pap = (StgPAP *)p;
2065 pap->fun = evacuate(pap->fun);
2066 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2067 p += pap_sizeW(pap);
2072 /* nothing to follow */
2073 p += arr_words_sizeW((StgArrWords *)p);
2077 /* follow everything */
2081 evac_gen = 0; /* repeatedly mutable */
2082 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2083 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2084 (StgClosure *)*p = evacuate((StgClosure *)*p);
2086 evac_gen = saved_evac_gen;
2090 case MUT_ARR_PTRS_FROZEN:
2091 /* follow everything */
2093 StgPtr start = p, next;
2095 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2096 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2097 (StgClosure *)*p = evacuate((StgClosure *)*p);
2099 if (failed_to_evac) {
2100 /* we can do this easier... */
2101 recordMutable((StgMutClosure *)start);
2102 failed_to_evac = rtsFalse;
2109 StgTSO *tso = (StgTSO *)p;
2112 evac_gen = saved_evac_gen;
2113 p += tso_sizeW(tso);
2118 case RBH: // cf. BLACKHOLE_BQ
2120 // nat size, ptrs, nonptrs, vhs;
2122 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2123 StgRBH *rbh = (StgRBH *)p;
2124 (StgClosure *)rbh->blocking_queue =
2125 evacuate((StgClosure *)rbh->blocking_queue);
2126 if (failed_to_evac) {
2127 failed_to_evac = rtsFalse;
2128 recordMutable((StgMutClosure *)rbh);
2131 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2132 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2133 // ToDo: use size of reverted closure here!
2134 p += BLACKHOLE_sizeW();
2140 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2141 /* follow the pointer to the node which is being demanded */
2142 (StgClosure *)bf->node =
2143 evacuate((StgClosure *)bf->node);
2144 /* follow the link to the rest of the blocking queue */
2145 (StgClosure *)bf->link =
2146 evacuate((StgClosure *)bf->link);
2147 if (failed_to_evac) {
2148 failed_to_evac = rtsFalse;
2149 recordMutable((StgMutClosure *)bf);
2152 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2153 bf, info_type((StgClosure *)bf),
2154 bf->node, info_type(bf->node)));
2155 p += sizeofW(StgBlockedFetch);
2161 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2162 p, info_type((StgClosure *)p)));
2163 p += sizeofW(StgFetchMe);
2164 break; // nothing to do in this case
2166 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2168 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2169 (StgClosure *)fmbq->blocking_queue =
2170 evacuate((StgClosure *)fmbq->blocking_queue);
2171 if (failed_to_evac) {
2172 failed_to_evac = rtsFalse;
2173 recordMutable((StgMutClosure *)fmbq);
2176 belch("@@ scavenge: %p (%s) exciting, isn't it",
2177 p, info_type((StgClosure *)p)));
2178 p += sizeofW(StgFetchMeBlockingQueue);
2184 barf("scavenge: unimplemented/strange closure type %d @ %p",
2188 barf("scavenge: unimplemented/strange closure type %d @ %p",
2192 /* If we didn't manage to promote all the objects pointed to by
2193 * the current object, then we have to designate this object as
2194 * mutable (because it contains old-to-new generation pointers).
2196 if (failed_to_evac) {
2197 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2198 failed_to_evac = rtsFalse;
2206 /* -----------------------------------------------------------------------------
2207 Scavenge one object.
2209 This is used for objects that are temporarily marked as mutable
2210 because they contain old-to-new generation pointers. Only certain
2211 objects can have this property.
2212 -------------------------------------------------------------------------- */
2213 //@cindex scavenge_one
2216 scavenge_one(StgClosure *p)
2218 const StgInfoTable *info;
2221 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2222 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2227 if (info->type==RBH)
2228 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2231 switch (info -> type) {
2234 case FUN_1_0: /* hardly worth specialising these guys */
2254 case IND_OLDGEN_PERM:
2258 end = (P_)p->payload + info->layout.payload.ptrs;
2259 for (q = (P_)p->payload; q < end; q++) {
2260 (StgClosure *)*q = evacuate((StgClosure *)*q);
2266 case SE_CAF_BLACKHOLE:
2271 case THUNK_SELECTOR:
2273 StgSelector *s = (StgSelector *)p;
2274 s->selectee = evacuate(s->selectee);
2278 case AP_UPD: /* same as PAPs */
2280 /* Treat a PAP just like a section of stack, not forgetting to
2281 * evacuate the function pointer too...
2284 StgPAP* pap = (StgPAP *)p;
2286 pap->fun = evacuate(pap->fun);
2287 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2292 /* This might happen if for instance a MUT_CONS was pointing to a
2293 * THUNK which has since been updated. The IND_OLDGEN will
2294 * be on the mutable list anyway, so we don't need to do anything
2300 barf("scavenge_one: strange object %d", (int)(info->type));
2303 no_luck = failed_to_evac;
2304 failed_to_evac = rtsFalse;
2309 /* -----------------------------------------------------------------------------
2310 Scavenging mutable lists.
2312 We treat the mutable list of each generation > N (i.e. all the
2313 generations older than the one being collected) as roots. We also
2314 remove non-mutable objects from the mutable list at this point.
2315 -------------------------------------------------------------------------- */
2316 //@cindex scavenge_mut_once_list
2319 scavenge_mut_once_list(generation *gen)
2321 const StgInfoTable *info;
2322 StgMutClosure *p, *next, *new_list;
2324 p = gen->mut_once_list;
2325 new_list = END_MUT_LIST;
2329 failed_to_evac = rtsFalse;
2331 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2333 /* make sure the info pointer is into text space */
2334 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2335 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2339 if (info->type==RBH)
2340 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2342 switch(info->type) {
2345 case IND_OLDGEN_PERM:
2347 /* Try to pull the indirectee into this generation, so we can
2348 * remove the indirection from the mutable list.
2350 ((StgIndOldGen *)p)->indirectee =
2351 evacuate(((StgIndOldGen *)p)->indirectee);
2354 if (RtsFlags.DebugFlags.gc)
2355 /* Debugging code to print out the size of the thing we just
2359 StgPtr start = gen->steps[0].scan;
2360 bdescr *start_bd = gen->steps[0].scan_bd;
2362 scavenge(&gen->steps[0]);
2363 if (start_bd != gen->steps[0].scan_bd) {
2364 size += (P_)BLOCK_ROUND_UP(start) - start;
2365 start_bd = start_bd->link;
2366 while (start_bd != gen->steps[0].scan_bd) {
2367 size += BLOCK_SIZE_W;
2368 start_bd = start_bd->link;
2370 size += gen->steps[0].scan -
2371 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2373 size = gen->steps[0].scan - start;
2375 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2379 /* failed_to_evac might happen if we've got more than two
2380 * generations, we're collecting only generation 0, the
2381 * indirection resides in generation 2 and the indirectee is
2384 if (failed_to_evac) {
2385 failed_to_evac = rtsFalse;
2386 p->mut_link = new_list;
2389 /* the mut_link field of an IND_STATIC is overloaded as the
2390 * static link field too (it just so happens that we don't need
2391 * both at the same time), so we need to NULL it out when
2392 * removing this object from the mutable list because the static
2393 * link fields are all assumed to be NULL before doing a major
2401 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2402 * it from the mutable list if possible by promoting whatever it
2405 ASSERT(p->header.info == &stg_MUT_CONS_info);
2406 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2407 /* didn't manage to promote everything, so put the
2408 * MUT_CONS back on the list.
2410 p->mut_link = new_list;
2416 /* shouldn't have anything else on the mutables list */
2417 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2421 gen->mut_once_list = new_list;
2424 //@cindex scavenge_mutable_list
2427 scavenge_mutable_list(generation *gen)
2429 const StgInfoTable *info;
2430 StgMutClosure *p, *next;
2432 p = gen->saved_mut_list;
2436 failed_to_evac = rtsFalse;
2438 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2440 /* make sure the info pointer is into text space */
2441 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2442 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2446 if (info->type==RBH)
2447 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2449 switch(info->type) {
2451 case MUT_ARR_PTRS_FROZEN:
2452 /* remove this guy from the mutable list, but follow the ptrs
2453 * anyway (and make sure they get promoted to this gen).
2458 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2460 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2461 (StgClosure *)*q = evacuate((StgClosure *)*q);
2465 if (failed_to_evac) {
2466 failed_to_evac = rtsFalse;
2467 p->mut_link = gen->mut_list;
2474 /* follow everything */
2475 p->mut_link = gen->mut_list;
2480 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2481 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2482 (StgClosure *)*q = evacuate((StgClosure *)*q);
2488 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2489 * it from the mutable list if possible by promoting whatever it
2492 ASSERT(p->header.info != &stg_MUT_CONS_info);
2493 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2494 p->mut_link = gen->mut_list;
2500 StgMVar *mvar = (StgMVar *)p;
2501 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2502 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2503 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2504 p->mut_link = gen->mut_list;
2511 StgTSO *tso = (StgTSO *)p;
2515 /* Don't take this TSO off the mutable list - it might still
2516 * point to some younger objects (because we set evac_gen to 0
2519 tso->mut_link = gen->mut_list;
2520 gen->mut_list = (StgMutClosure *)tso;
2526 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2527 (StgClosure *)bh->blocking_queue =
2528 evacuate((StgClosure *)bh->blocking_queue);
2529 p->mut_link = gen->mut_list;
2534 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2537 case IND_OLDGEN_PERM:
2538 /* Try to pull the indirectee into this generation, so we can
2539 * remove the indirection from the mutable list.
2542 ((StgIndOldGen *)p)->indirectee =
2543 evacuate(((StgIndOldGen *)p)->indirectee);
2546 if (failed_to_evac) {
2547 failed_to_evac = rtsFalse;
2548 p->mut_link = gen->mut_once_list;
2549 gen->mut_once_list = p;
2556 // HWL: check whether all of these are necessary
2558 case RBH: // cf. BLACKHOLE_BQ
2560 // nat size, ptrs, nonptrs, vhs;
2562 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2563 StgRBH *rbh = (StgRBH *)p;
2564 (StgClosure *)rbh->blocking_queue =
2565 evacuate((StgClosure *)rbh->blocking_queue);
2566 if (failed_to_evac) {
2567 failed_to_evac = rtsFalse;
2568 recordMutable((StgMutClosure *)rbh);
2570 // ToDo: use size of reverted closure here!
2571 p += BLACKHOLE_sizeW();
2577 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2578 /* follow the pointer to the node which is being demanded */
2579 (StgClosure *)bf->node =
2580 evacuate((StgClosure *)bf->node);
2581 /* follow the link to the rest of the blocking queue */
2582 (StgClosure *)bf->link =
2583 evacuate((StgClosure *)bf->link);
2584 if (failed_to_evac) {
2585 failed_to_evac = rtsFalse;
2586 recordMutable((StgMutClosure *)bf);
2588 p += sizeofW(StgBlockedFetch);
2593 p += sizeofW(StgFetchMe);
2594 break; // nothing to do in this case
2596 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2598 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2599 (StgClosure *)fmbq->blocking_queue =
2600 evacuate((StgClosure *)fmbq->blocking_queue);
2601 if (failed_to_evac) {
2602 failed_to_evac = rtsFalse;
2603 recordMutable((StgMutClosure *)fmbq);
2605 p += sizeofW(StgFetchMeBlockingQueue);
2611 /* shouldn't have anything else on the mutables list */
2612 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2617 //@cindex scavenge_static
2620 scavenge_static(void)
2622 StgClosure* p = static_objects;
2623 const StgInfoTable *info;
2625 /* Always evacuate straight to the oldest generation for static
2627 evac_gen = oldest_gen->no;
2629 /* keep going until we've scavenged all the objects on the linked
2631 while (p != END_OF_STATIC_LIST) {
2635 if (info->type==RBH)
2636 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2638 /* make sure the info pointer is into text space */
2639 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2640 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2642 /* Take this object *off* the static_objects list,
2643 * and put it on the scavenged_static_objects list.
2645 static_objects = STATIC_LINK(info,p);
2646 STATIC_LINK(info,p) = scavenged_static_objects;
2647 scavenged_static_objects = p;
2649 switch (info -> type) {
2653 StgInd *ind = (StgInd *)p;
2654 ind->indirectee = evacuate(ind->indirectee);
2656 /* might fail to evacuate it, in which case we have to pop it
2657 * back on the mutable list (and take it off the
2658 * scavenged_static list because the static link and mut link
2659 * pointers are one and the same).
2661 if (failed_to_evac) {
2662 failed_to_evac = rtsFalse;
2663 scavenged_static_objects = STATIC_LINK(info,p);
2664 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2665 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2679 next = (P_)p->payload + info->layout.payload.ptrs;
2680 /* evacuate the pointers */
2681 for (q = (P_)p->payload; q < next; q++) {
2682 (StgClosure *)*q = evacuate((StgClosure *)*q);
2688 barf("scavenge_static: strange closure %d", (int)(info->type));
2691 ASSERT(failed_to_evac == rtsFalse);
2693 /* get the next static object from the list. Remember, there might
2694 * be more stuff on this list now that we've done some evacuating!
2695 * (static_objects is a global)
2701 /* -----------------------------------------------------------------------------
2702 scavenge_stack walks over a section of stack and evacuates all the
2703 objects pointed to by it. We can use the same code for walking
2704 PAPs, since these are just sections of copied stack.
2705 -------------------------------------------------------------------------- */
2706 //@cindex scavenge_stack
2709 scavenge_stack(StgPtr p, StgPtr stack_end)
2712 const StgInfoTable* info;
2715 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2718 * Each time around this loop, we are looking at a chunk of stack
2719 * that starts with either a pending argument section or an
2720 * activation record.
2723 while (p < stack_end) {
2726 /* If we've got a tag, skip over that many words on the stack */
2727 if (IS_ARG_TAG((W_)q)) {
2732 /* Is q a pointer to a closure?
2734 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2736 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2737 ASSERT(closure_STATIC((StgClosure *)q));
2739 /* otherwise, must be a pointer into the allocation space. */
2742 (StgClosure *)*p = evacuate((StgClosure *)q);
2748 * Otherwise, q must be the info pointer of an activation
2749 * record. All activation records have 'bitmap' style layout
2752 info = get_itbl((StgClosure *)p);
2754 switch (info->type) {
2756 /* Dynamic bitmap: the mask is stored on the stack */
2758 bitmap = ((StgRetDyn *)p)->liveness;
2759 p = (P_)&((StgRetDyn *)p)->payload[0];
2762 /* probably a slow-entry point return address: */
2770 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2771 old_p, p, old_p+1));
2773 p++; /* what if FHS!=1 !? -- HWL */
2778 /* Specialised code for update frames, since they're so common.
2779 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2780 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2784 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2786 nat type = get_itbl(frame->updatee)->type;
2788 p += sizeofW(StgUpdateFrame);
2789 if (type == EVACUATED) {
2790 frame->updatee = evacuate(frame->updatee);
2793 bdescr *bd = Bdescr((P_)frame->updatee);
2795 if (bd->gen->no > N) {
2796 if (bd->gen->no < evac_gen) {
2797 failed_to_evac = rtsTrue;
2802 /* Don't promote blackholes */
2804 if (!(stp->gen->no == 0 &&
2806 stp->no == stp->gen->n_steps-1)) {
2813 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2814 sizeofW(StgHeader), stp);
2815 frame->updatee = to;
2818 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
2819 frame->updatee = to;
2820 recordMutable((StgMutClosure *)to);
2823 /* will never be SE_{,CAF_}BLACKHOLE, since we
2824 don't push an update frame for single-entry thunks. KSW 1999-01. */
2825 barf("scavenge_stack: UPDATE_FRAME updatee");
2830 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2837 bitmap = info->layout.bitmap;
2839 /* this assumes that the payload starts immediately after the info-ptr */
2841 while (bitmap != 0) {
2842 if ((bitmap & 1) == 0) {
2843 (StgClosure *)*p = evacuate((StgClosure *)*p);
2846 bitmap = bitmap >> 1;
2853 /* large bitmap (> 32 entries) */
2858 StgLargeBitmap *large_bitmap;
2861 large_bitmap = info->layout.large_bitmap;
2864 for (i=0; i<large_bitmap->size; i++) {
2865 bitmap = large_bitmap->bitmap[i];
2866 q = p + sizeof(W_) * 8;
2867 while (bitmap != 0) {
2868 if ((bitmap & 1) == 0) {
2869 (StgClosure *)*p = evacuate((StgClosure *)*p);
2872 bitmap = bitmap >> 1;
2874 if (i+1 < large_bitmap->size) {
2876 (StgClosure *)*p = evacuate((StgClosure *)*p);
2882 /* and don't forget to follow the SRT */
2887 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2892 /*-----------------------------------------------------------------------------
2893 scavenge the large object list.
2895 evac_gen set by caller; similar games played with evac_gen as with
2896 scavenge() - see comment at the top of scavenge(). Most large
2897 objects are (repeatedly) mutable, so most of the time evac_gen will
2899 --------------------------------------------------------------------------- */
2900 //@cindex scavenge_large
2903 scavenge_large(step *stp)
2907 const StgInfoTable* info;
2908 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2910 evac_gen = 0; /* most objects are mutable */
2911 bd = stp->new_large_objects;
2913 for (; bd != NULL; bd = stp->new_large_objects) {
2915 /* take this object *off* the large objects list and put it on
2916 * the scavenged large objects list. This is so that we can
2917 * treat new_large_objects as a stack and push new objects on
2918 * the front when evacuating.
2920 stp->new_large_objects = bd->link;
2921 dbl_link_onto(bd, &stp->scavenged_large_objects);
2924 info = get_itbl((StgClosure *)p);
2926 switch (info->type) {
2928 /* only certain objects can be "large"... */
2931 /* nothing to follow */
2935 /* follow everything */
2939 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2940 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2941 (StgClosure *)*p = evacuate((StgClosure *)*p);
2946 case MUT_ARR_PTRS_FROZEN:
2947 /* follow everything */
2949 StgPtr start = p, next;
2951 evac_gen = saved_evac_gen; /* not really mutable */
2952 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2953 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2954 (StgClosure *)*p = evacuate((StgClosure *)*p);
2957 if (failed_to_evac) {
2958 recordMutable((StgMutClosure *)start);
2964 scavengeTSO((StgTSO *)p);
2970 StgPAP* pap = (StgPAP *)p;
2972 evac_gen = saved_evac_gen; /* not really mutable */
2973 pap->fun = evacuate(pap->fun);
2974 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2980 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
2985 //@cindex zero_static_object_list
2988 zero_static_object_list(StgClosure* first_static)
2992 const StgInfoTable *info;
2994 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2996 link = STATIC_LINK(info, p);
2997 STATIC_LINK(info,p) = NULL;
3001 /* This function is only needed because we share the mutable link
3002 * field with the static link field in an IND_STATIC, so we have to
3003 * zero the mut_link field before doing a major GC, which needs the
3004 * static link field.
3006 * It doesn't do any harm to zero all the mutable link fields on the
3011 zero_mutable_list( StgMutClosure *first )
3013 StgMutClosure *next, *c;
3015 for (c = first; c != END_MUT_LIST; c = next) {
3021 /* -----------------------------------------------------------------------------
3023 -------------------------------------------------------------------------- */
3032 for (c = (StgIndStatic *)caf_list; c != NULL; c = (StgIndStatic *)c->static_link) {
3033 c->header.info = c->saved_info;
3034 c->saved_info = NULL;
3035 /* could, but not necessary: c->static_link = NULL; */
3041 scavengeCAFs( void )
3046 for (c = (StgIndStatic *)caf_list; c != NULL; c = (StgIndStatic *)c->static_link) {
3047 c->indirectee = evacuate(c->indirectee);
3053 /* -----------------------------------------------------------------------------
3054 Sanity code for CAF garbage collection.
3056 With DEBUG turned on, we manage a CAF list in addition to the SRT
3057 mechanism. After GC, we run down the CAF list and blackhole any
3058 CAFs which have been garbage collected. This means we get an error
3059 whenever the program tries to enter a garbage collected CAF.
3061 Any garbage collected CAFs are taken off the CAF list at the same
3063 -------------------------------------------------------------------------- */
3073 const StgInfoTable *info;
3084 ASSERT(info->type == IND_STATIC);
3086 if (STATIC_LINK(info,p) == NULL) {
3087 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3089 SET_INFO(p,&stg_BLACKHOLE_info);
3090 p = STATIC_LINK2(info,p);
3094 pp = &STATIC_LINK2(info,p);
3101 /* fprintf(stderr, "%d CAFs live\n", i); */
3105 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3106 //@subsection Lazy black holing
3108 /* -----------------------------------------------------------------------------
3111 Whenever a thread returns to the scheduler after possibly doing
3112 some work, we have to run down the stack and black-hole all the
3113 closures referred to by update frames.
3114 -------------------------------------------------------------------------- */
3115 //@cindex threadLazyBlackHole
3118 threadLazyBlackHole(StgTSO *tso)
3120 StgUpdateFrame *update_frame;
3121 StgBlockingQueue *bh;
3124 stack_end = &tso->stack[tso->stack_size];
3125 update_frame = tso->su;
3128 switch (get_itbl(update_frame)->type) {
3131 update_frame = ((StgCatchFrame *)update_frame)->link;
3135 bh = (StgBlockingQueue *)update_frame->updatee;
3137 /* if the thunk is already blackholed, it means we've also
3138 * already blackholed the rest of the thunks on this stack,
3139 * so we can stop early.
3141 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3142 * don't interfere with this optimisation.
3144 if (bh->header.info == &stg_BLACKHOLE_info) {
3148 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3149 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3150 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3151 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3153 SET_INFO(bh,&stg_BLACKHOLE_info);
3156 update_frame = update_frame->link;
3160 update_frame = ((StgSeqFrame *)update_frame)->link;
3166 barf("threadPaused");
3171 //@node Stack squeezing, Pausing a thread, Lazy black holing
3172 //@subsection Stack squeezing
3174 /* -----------------------------------------------------------------------------
3177 * Code largely pinched from old RTS, then hacked to bits. We also do
3178 * lazy black holing here.
3180 * -------------------------------------------------------------------------- */
3181 //@cindex threadSqueezeStack
3184 threadSqueezeStack(StgTSO *tso)
3186 lnat displacement = 0;
3187 StgUpdateFrame *frame;
3188 StgUpdateFrame *next_frame; /* Temporally next */
3189 StgUpdateFrame *prev_frame; /* Temporally previous */
3191 rtsBool prev_was_update_frame;
3193 StgUpdateFrame *top_frame;
3194 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3196 void printObj( StgClosure *obj ); // from Printer.c
3198 top_frame = tso->su;
3201 bottom = &(tso->stack[tso->stack_size]);
3204 /* There must be at least one frame, namely the STOP_FRAME.
3206 ASSERT((P_)frame < bottom);
3208 /* Walk down the stack, reversing the links between frames so that
3209 * we can walk back up as we squeeze from the bottom. Note that
3210 * next_frame and prev_frame refer to next and previous as they were
3211 * added to the stack, rather than the way we see them in this
3212 * walk. (It makes the next loop less confusing.)
3214 * Stop if we find an update frame pointing to a black hole
3215 * (see comment in threadLazyBlackHole()).
3219 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3220 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3221 prev_frame = frame->link;
3222 frame->link = next_frame;
3227 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3228 printObj((StgClosure *)prev_frame);
3229 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3232 switch (get_itbl(frame)->type) {
3235 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3248 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3250 printObj((StgClosure *)prev_frame);
3253 if (get_itbl(frame)->type == UPDATE_FRAME
3254 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3259 /* Now, we're at the bottom. Frame points to the lowest update
3260 * frame on the stack, and its link actually points to the frame
3261 * above. We have to walk back up the stack, squeezing out empty
3262 * update frames and turning the pointers back around on the way
3265 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3266 * we never want to eliminate it anyway. Just walk one step up
3267 * before starting to squeeze. When you get to the topmost frame,
3268 * remember that there are still some words above it that might have
3275 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3278 * Loop through all of the frames (everything except the very
3279 * bottom). Things are complicated by the fact that we have
3280 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3281 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3283 while (frame != NULL) {
3285 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3286 rtsBool is_update_frame;
3288 next_frame = frame->link;
3289 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3292 * 1. both the previous and current frame are update frames
3293 * 2. the current frame is empty
3295 if (prev_was_update_frame && is_update_frame &&
3296 (P_)prev_frame == frame_bottom + displacement) {
3298 /* Now squeeze out the current frame */
3299 StgClosure *updatee_keep = prev_frame->updatee;
3300 StgClosure *updatee_bypass = frame->updatee;
3303 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3307 /* Deal with blocking queues. If both updatees have blocked
3308 * threads, then we should merge the queues into the update
3309 * frame that we're keeping.
3311 * Alternatively, we could just wake them up: they'll just go
3312 * straight to sleep on the proper blackhole! This is less code
3313 * and probably less bug prone, although it's probably much
3316 #if 0 /* do it properly... */
3317 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3318 # error Unimplemented lazy BH warning. (KSW 1999-01)
3320 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3321 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3323 /* Sigh. It has one. Don't lose those threads! */
3324 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3325 /* Urgh. Two queues. Merge them. */
3326 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3328 while (keep_tso->link != END_TSO_QUEUE) {
3329 keep_tso = keep_tso->link;
3331 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3334 /* For simplicity, just swap the BQ for the BH */
3335 P_ temp = updatee_keep;
3337 updatee_keep = updatee_bypass;
3338 updatee_bypass = temp;
3340 /* Record the swap in the kept frame (below) */
3341 prev_frame->updatee = updatee_keep;
3346 TICK_UPD_SQUEEZED();
3347 /* wasn't there something about update squeezing and ticky to be
3348 * sorted out? oh yes: we aren't counting each enter properly
3349 * in this case. See the log somewhere. KSW 1999-04-21
3351 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3353 sp = (P_)frame - 1; /* sp = stuff to slide */
3354 displacement += sizeofW(StgUpdateFrame);
3357 /* No squeeze for this frame */
3358 sp = frame_bottom - 1; /* Keep the current frame */
3360 /* Do lazy black-holing.
3362 if (is_update_frame) {
3363 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3364 if (bh->header.info != &stg_BLACKHOLE_info &&
3365 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3366 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3367 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3368 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3371 /* zero out the slop so that the sanity checker can tell
3372 * where the next closure is.
3375 StgInfoTable *info = get_itbl(bh);
3376 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3377 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3378 * info is used for a different purpose, and it's exactly the
3379 * same size as a BLACKHOLE in any case.
3381 if (info->type != THUNK_SELECTOR) {
3382 for (i = np; i < np + nw; i++) {
3383 ((StgClosure *)bh)->payload[i] = 0;
3388 SET_INFO(bh,&stg_BLACKHOLE_info);
3392 /* Fix the link in the current frame (should point to the frame below) */
3393 frame->link = prev_frame;
3394 prev_was_update_frame = is_update_frame;
3397 /* Now slide all words from sp up to the next frame */
3399 if (displacement > 0) {
3400 P_ next_frame_bottom;
3402 if (next_frame != NULL)
3403 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3405 next_frame_bottom = tso->sp - 1;
3409 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3413 while (sp >= next_frame_bottom) {
3414 sp[displacement] = *sp;
3418 (P_)prev_frame = (P_)frame + displacement;
3422 tso->sp += displacement;
3423 tso->su = prev_frame;
3426 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3427 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3431 //@node Pausing a thread, Index, Stack squeezing
3432 //@subsection Pausing a thread
3434 /* -----------------------------------------------------------------------------
3437 * We have to prepare for GC - this means doing lazy black holing
3438 * here. We also take the opportunity to do stack squeezing if it's
3440 * -------------------------------------------------------------------------- */
3441 //@cindex threadPaused
3443 threadPaused(StgTSO *tso)
3445 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3446 threadSqueezeStack(tso); /* does black holing too */
3448 threadLazyBlackHole(tso);
3451 /* -----------------------------------------------------------------------------
3453 * -------------------------------------------------------------------------- */
3456 //@cindex printMutOnceList
3458 printMutOnceList(generation *gen)
3460 StgMutClosure *p, *next;
3462 p = gen->mut_once_list;
3465 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3466 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3467 fprintf(stderr, "%p (%s), ",
3468 p, info_type((StgClosure *)p));
3470 fputc('\n', stderr);
3473 //@cindex printMutableList
3475 printMutableList(generation *gen)
3477 StgMutClosure *p, *next;
3482 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3483 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3484 fprintf(stderr, "%p (%s), ",
3485 p, info_type((StgClosure *)p));
3487 fputc('\n', stderr);
3490 //@cindex maybeLarge
3491 static inline rtsBool
3492 maybeLarge(StgClosure *closure)
3494 StgInfoTable *info = get_itbl(closure);
3496 /* closure types that may be found on the new_large_objects list;
3497 see scavenge_large */
3498 return (info->type == MUT_ARR_PTRS ||
3499 info->type == MUT_ARR_PTRS_FROZEN ||
3500 info->type == TSO ||
3501 info->type == ARR_WORDS);
3507 //@node Index, , Pausing a thread
3511 //* GarbageCollect:: @cindex\s-+GarbageCollect
3512 //* MarkRoot:: @cindex\s-+MarkRoot
3513 //* RevertCAFs:: @cindex\s-+RevertCAFs
3514 //* addBlock:: @cindex\s-+addBlock
3515 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3516 //* copy:: @cindex\s-+copy
3517 //* copyPart:: @cindex\s-+copyPart
3518 //* evacuate:: @cindex\s-+evacuate
3519 //* evacuate_large:: @cindex\s-+evacuate_large
3520 //* gcCAFs:: @cindex\s-+gcCAFs
3521 //* isAlive:: @cindex\s-+isAlive
3522 //* maybeLarge:: @cindex\s-+maybeLarge
3523 //* mkMutCons:: @cindex\s-+mkMutCons
3524 //* printMutOnceList:: @cindex\s-+printMutOnceList
3525 //* printMutableList:: @cindex\s-+printMutableList
3526 //* relocate_TSO:: @cindex\s-+relocate_TSO
3527 //* scavenge:: @cindex\s-+scavenge
3528 //* scavenge_large:: @cindex\s-+scavenge_large
3529 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3530 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3531 //* scavenge_one:: @cindex\s-+scavenge_one
3532 //* scavenge_srt:: @cindex\s-+scavenge_srt
3533 //* scavenge_stack:: @cindex\s-+scavenge_stack
3534 //* scavenge_static:: @cindex\s-+scavenge_static
3535 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3536 //* threadPaused:: @cindex\s-+threadPaused
3537 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3538 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3539 //* upd_evacuee:: @cindex\s-+upd_evacuee
3540 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3541 //* zero_static_object_list:: @cindex\s-+zero_static_object_list