1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.87 2000/11/13 14:40:37 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
46 #if defined(GRAN) || defined(PAR)
47 # include "GranSimRts.h"
48 # include "ParallelRts.h"
52 # include "ParallelDebug.h"
59 #if defined(RTS_GTK_FRONTPANEL)
60 #include "FrontPanel.h"
63 //@node STATIC OBJECT LIST, Static function declarations, Includes
64 //@subsection STATIC OBJECT LIST
66 /* STATIC OBJECT LIST.
69 * We maintain a linked list of static objects that are still live.
70 * The requirements for this list are:
72 * - we need to scan the list while adding to it, in order to
73 * scavenge all the static objects (in the same way that
74 * breadth-first scavenging works for dynamic objects).
76 * - we need to be able to tell whether an object is already on
77 * the list, to break loops.
79 * Each static object has a "static link field", which we use for
80 * linking objects on to the list. We use a stack-type list, consing
81 * objects on the front as they are added (this means that the
82 * scavenge phase is depth-first, not breadth-first, but that
85 * A separate list is kept for objects that have been scavenged
86 * already - this is so that we can zero all the marks afterwards.
88 * An object is on the list if its static link field is non-zero; this
89 * means that we have to mark the end of the list with '1', not NULL.
91 * Extra notes for generational GC:
93 * Each generation has a static object list associated with it. When
94 * collecting generations up to N, we treat the static object lists
95 * from generations > N as roots.
97 * We build up a static object list while collecting generations 0..N,
98 * which is then appended to the static object list of generation N+1.
100 StgClosure* static_objects; /* live static objects */
101 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
103 /* N is the oldest generation being collected, where the generations
104 * are numbered starting at 0. A major GC (indicated by the major_gc
105 * flag) is when we're collecting all generations. We only attempt to
106 * deal with static objects and GC CAFs when doing a major GC.
109 static rtsBool major_gc;
111 /* Youngest generation that objects should be evacuated to in
112 * evacuate(). (Logically an argument to evacuate, but it's static
113 * a lot of the time so we optimise it into a global variable).
119 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
120 static rtsBool weak_done; /* all done for this pass */
122 /* List of all threads during GC
124 static StgTSO *old_all_threads;
125 static StgTSO *resurrected_threads;
127 /* Flag indicating failure to evacuate an object to the desired
130 static rtsBool failed_to_evac;
132 /* Old to-space (used for two-space collector only)
134 bdescr *old_to_space;
136 /* Data used for allocation area sizing.
138 lnat new_blocks; /* blocks allocated during this GC */
139 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
141 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
142 //@subsection Static function declarations
144 /* -----------------------------------------------------------------------------
145 Static function declarations
146 -------------------------------------------------------------------------- */
148 static StgClosure * evacuate ( StgClosure *q );
149 static void zero_static_object_list ( StgClosure* first_static );
150 static void zero_mutable_list ( StgMutClosure *first );
152 static rtsBool traverse_weak_ptr_list ( void );
153 static void cleanup_weak_ptr_list ( StgWeak **list );
155 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
156 static void scavenge_large ( step *step );
157 static void scavenge ( step *step );
158 static void scavenge_static ( void );
159 static void scavenge_mutable_list ( generation *g );
160 static void scavenge_mut_once_list ( generation *g );
163 static void gcCAFs ( void );
166 //@node Garbage Collect, Weak Pointers, Static function declarations
167 //@subsection Garbage Collect
169 /* -----------------------------------------------------------------------------
172 For garbage collecting generation N (and all younger generations):
174 - follow all pointers in the root set. the root set includes all
175 mutable objects in all steps in all generations.
177 - for each pointer, evacuate the object it points to into either
178 + to-space in the next higher step in that generation, if one exists,
179 + if the object's generation == N, then evacuate it to the next
180 generation if one exists, or else to-space in the current
182 + if the object's generation < N, then evacuate it to to-space
183 in the next generation.
185 - repeatedly scavenge to-space from each step in each generation
186 being collected until no more objects can be evacuated.
188 - free from-space in each step, and set from-space = to-space.
190 -------------------------------------------------------------------------- */
191 //@cindex GarbageCollect
193 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
197 lnat live, allocated, collected = 0, copied = 0;
201 CostCentreStack *prev_CCS;
204 #if defined(DEBUG) && defined(GRAN)
205 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
209 /* tell the stats department that we've started a GC */
212 /* attribute any costs to CCS_GC */
218 /* Approximate how much we allocated.
219 * Todo: only when generating stats?
221 allocated = calcAllocated();
223 /* Figure out which generation to collect
225 if (force_major_gc) {
226 N = RtsFlags.GcFlags.generations - 1;
230 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
231 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
235 major_gc = (N == RtsFlags.GcFlags.generations-1);
238 #ifdef RTS_GTK_FRONTPANEL
239 if (RtsFlags.GcFlags.frontpanel) {
240 updateFrontPanelBeforeGC(N);
244 /* check stack sanity *before* GC (ToDo: check all threads) */
246 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
248 IF_DEBUG(sanity, checkFreeListSanity());
250 /* Initialise the static object lists
252 static_objects = END_OF_STATIC_LIST;
253 scavenged_static_objects = END_OF_STATIC_LIST;
255 /* zero the mutable list for the oldest generation (see comment by
256 * zero_mutable_list below).
259 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
262 /* Save the old to-space if we're doing a two-space collection
264 if (RtsFlags.GcFlags.generations == 1) {
265 old_to_space = g0s0->to_space;
266 g0s0->to_space = NULL;
269 /* Keep a count of how many new blocks we allocated during this GC
270 * (used for resizing the allocation area, later).
274 /* Initialise to-space in all the generations/steps that we're
277 for (g = 0; g <= N; g++) {
278 generations[g].mut_once_list = END_MUT_LIST;
279 generations[g].mut_list = END_MUT_LIST;
281 for (s = 0; s < generations[g].n_steps; s++) {
283 /* generation 0, step 0 doesn't need to-space */
284 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
288 /* Get a free block for to-space. Extra blocks will be chained on
292 step = &generations[g].steps[s];
293 ASSERT(step->gen->no == g);
294 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
295 bd->gen = &generations[g];
298 bd->evacuated = 1; /* it's a to-space block */
299 step->hp = bd->start;
300 step->hpLim = step->hp + BLOCK_SIZE_W;
304 step->scan = bd->start;
306 step->new_large_objects = NULL;
307 step->scavenged_large_objects = NULL;
309 /* mark the large objects as not evacuated yet */
310 for (bd = step->large_objects; bd; bd = bd->link) {
316 /* make sure the older generations have at least one block to
317 * allocate into (this makes things easier for copy(), see below.
319 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
320 for (s = 0; s < generations[g].n_steps; s++) {
321 step = &generations[g].steps[s];
322 if (step->hp_bd == NULL) {
324 bd->gen = &generations[g];
327 bd->evacuated = 0; /* *not* a to-space block */
328 step->hp = bd->start;
329 step->hpLim = step->hp + BLOCK_SIZE_W;
335 /* Set the scan pointer for older generations: remember we
336 * still have to scavenge objects that have been promoted. */
337 step->scan = step->hp;
338 step->scan_bd = step->hp_bd;
339 step->to_space = NULL;
341 step->new_large_objects = NULL;
342 step->scavenged_large_objects = NULL;
346 /* -----------------------------------------------------------------------
347 * follow all the roots that we know about:
348 * - mutable lists from each generation > N
349 * we want to *scavenge* these roots, not evacuate them: they're not
350 * going to move in this GC.
351 * Also: do them in reverse generation order. This is because we
352 * often want to promote objects that are pointed to by older
353 * generations early, so we don't have to repeatedly copy them.
354 * Doing the generations in reverse order ensures that we don't end
355 * up in the situation where we want to evac an object to gen 3 and
356 * it has already been evaced to gen 2.
360 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
361 generations[g].saved_mut_list = generations[g].mut_list;
362 generations[g].mut_list = END_MUT_LIST;
365 /* Do the mut-once lists first */
366 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
367 IF_PAR_DEBUG(verbose,
368 printMutOnceList(&generations[g]));
369 scavenge_mut_once_list(&generations[g]);
371 for (st = generations[g].n_steps-1; st >= 0; st--) {
372 scavenge(&generations[g].steps[st]);
376 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
377 IF_PAR_DEBUG(verbose,
378 printMutableList(&generations[g]));
379 scavenge_mutable_list(&generations[g]);
381 for (st = generations[g].n_steps-1; st >= 0; st--) {
382 scavenge(&generations[g].steps[st]);
387 /* follow all the roots that the application knows about.
393 /* And don't forget to mark the TSO if we got here direct from
395 /* Not needed in a seq version?
397 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
401 /* Mark the entries in the GALA table of the parallel system */
402 markLocalGAs(major_gc);
405 /* Mark the weak pointer list, and prepare to detect dead weak
408 old_weak_ptr_list = weak_ptr_list;
409 weak_ptr_list = NULL;
410 weak_done = rtsFalse;
412 /* The all_threads list is like the weak_ptr_list.
413 * See traverse_weak_ptr_list() for the details.
415 old_all_threads = all_threads;
416 all_threads = END_TSO_QUEUE;
417 resurrected_threads = END_TSO_QUEUE;
419 /* Mark the stable pointer table.
421 markStablePtrTable(major_gc);
425 /* ToDo: To fix the caf leak, we need to make the commented out
426 * parts of this code do something sensible - as described in
429 extern void markHugsObjects(void);
434 /* -------------------------------------------------------------------------
435 * Repeatedly scavenge all the areas we know about until there's no
436 * more scavenging to be done.
443 /* scavenge static objects */
444 if (major_gc && static_objects != END_OF_STATIC_LIST) {
446 checkStaticObjects());
450 /* When scavenging the older generations: Objects may have been
451 * evacuated from generations <= N into older generations, and we
452 * need to scavenge these objects. We're going to try to ensure that
453 * any evacuations that occur move the objects into at least the
454 * same generation as the object being scavenged, otherwise we
455 * have to create new entries on the mutable list for the older
459 /* scavenge each step in generations 0..maxgen */
463 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
464 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
465 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
468 step = &generations[gen].steps[st];
470 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
475 if (step->new_large_objects != NULL) {
476 scavenge_large(step);
483 if (flag) { goto loop; }
485 /* must be last... */
486 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
491 /* Final traversal of the weak pointer list (see comment by
492 * cleanUpWeakPtrList below).
494 cleanup_weak_ptr_list(&weak_ptr_list);
496 /* Now see which stable names are still alive.
498 gcStablePtrTable(major_gc);
501 /* Reconstruct the Global Address tables used in GUM */
502 rebuildGAtables(major_gc);
503 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
504 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
507 /* Set the maximum blocks for the oldest generation, based on twice
508 * the amount of live data now, adjusted to fit the maximum heap
511 * This is an approximation, since in the worst case we'll need
512 * twice the amount of live data plus whatever space the other
515 if (RtsFlags.GcFlags.generations > 1) {
517 oldest_gen->max_blocks =
518 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
519 RtsFlags.GcFlags.minOldGenSize);
520 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
521 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
522 if (((int)oldest_gen->max_blocks -
523 (int)oldest_gen->steps[0].to_blocks) <
524 (RtsFlags.GcFlags.pcFreeHeap *
525 RtsFlags.GcFlags.maxHeapSize / 200)) {
532 /* run through all the generations/steps and tidy up
534 copied = new_blocks * BLOCK_SIZE_W;
535 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
538 generations[g].collections++; /* for stats */
541 for (s = 0; s < generations[g].n_steps; s++) {
543 step = &generations[g].steps[s];
545 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
546 /* Tidy the end of the to-space chains */
547 step->hp_bd->free = step->hp;
548 step->hp_bd->link = NULL;
549 /* stats information: how much we copied */
551 copied -= step->hp_bd->start + BLOCK_SIZE_W -
556 /* for generations we collected... */
559 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
561 /* free old memory and shift to-space into from-space for all
562 * the collected steps (except the allocation area). These
563 * freed blocks will probaby be quickly recycled.
565 if (!(g == 0 && s == 0)) {
566 freeChain(step->blocks);
567 step->blocks = step->to_space;
568 step->n_blocks = step->to_blocks;
569 step->to_space = NULL;
571 for (bd = step->blocks; bd != NULL; bd = bd->link) {
572 bd->evacuated = 0; /* now from-space */
576 /* LARGE OBJECTS. The current live large objects are chained on
577 * scavenged_large, having been moved during garbage
578 * collection from large_objects. Any objects left on
579 * large_objects list are therefore dead, so we free them here.
581 for (bd = step->large_objects; bd != NULL; bd = next) {
586 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
589 step->large_objects = step->scavenged_large_objects;
591 /* Set the maximum blocks for this generation, interpolating
592 * between the maximum size of the oldest and youngest
595 * max_blocks = oldgen_max_blocks * G
596 * ----------------------
601 generations[g].max_blocks = (oldest_gen->max_blocks * g)
602 / (RtsFlags.GcFlags.generations-1);
604 generations[g].max_blocks = oldest_gen->max_blocks;
607 /* for older generations... */
610 /* For older generations, we need to append the
611 * scavenged_large_object list (i.e. large objects that have been
612 * promoted during this GC) to the large_object list for that step.
614 for (bd = step->scavenged_large_objects; bd; bd = next) {
617 dbl_link_onto(bd, &step->large_objects);
620 /* add the new blocks we promoted during this GC */
621 step->n_blocks += step->to_blocks;
626 /* Guess the amount of live data for stats. */
629 /* Free the small objects allocated via allocate(), since this will
630 * all have been copied into G0S1 now.
632 if (small_alloc_list != NULL) {
633 freeChain(small_alloc_list);
635 small_alloc_list = NULL;
639 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
641 /* Two-space collector:
642 * Free the old to-space, and estimate the amount of live data.
644 if (RtsFlags.GcFlags.generations == 1) {
647 if (old_to_space != NULL) {
648 freeChain(old_to_space);
650 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
651 bd->evacuated = 0; /* now from-space */
654 /* For a two-space collector, we need to resize the nursery. */
656 /* set up a new nursery. Allocate a nursery size based on a
657 * function of the amount of live data (currently a factor of 2,
658 * should be configurable (ToDo)). Use the blocks from the old
659 * nursery if possible, freeing up any left over blocks.
661 * If we get near the maximum heap size, then adjust our nursery
662 * size accordingly. If the nursery is the same size as the live
663 * data (L), then we need 3L bytes. We can reduce the size of the
664 * nursery to bring the required memory down near 2L bytes.
666 * A normal 2-space collector would need 4L bytes to give the same
667 * performance we get from 3L bytes, reducing to the same
668 * performance at 2L bytes.
670 blocks = g0s0->to_blocks;
672 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
673 RtsFlags.GcFlags.maxHeapSize ) {
674 int adjusted_blocks; /* signed on purpose */
677 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
678 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
679 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
680 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
683 blocks = adjusted_blocks;
686 blocks *= RtsFlags.GcFlags.oldGenFactor;
687 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
688 blocks = RtsFlags.GcFlags.minAllocAreaSize;
691 resizeNursery(blocks);
694 /* Generational collector:
695 * If the user has given us a suggested heap size, adjust our
696 * allocation area to make best use of the memory available.
699 if (RtsFlags.GcFlags.heapSizeSuggestion) {
701 nat needed = calcNeeded(); /* approx blocks needed at next GC */
703 /* Guess how much will be live in generation 0 step 0 next time.
704 * A good approximation is the obtained by finding the
705 * percentage of g0s0 that was live at the last minor GC.
708 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
711 /* Estimate a size for the allocation area based on the
712 * information available. We might end up going slightly under
713 * or over the suggested heap size, but we should be pretty
716 * Formula: suggested - needed
717 * ----------------------------
718 * 1 + g0s0_pcnt_kept/100
720 * where 'needed' is the amount of memory needed at the next
721 * collection for collecting all steps except g0s0.
724 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
725 (100 + (int)g0s0_pcnt_kept);
727 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
728 blocks = RtsFlags.GcFlags.minAllocAreaSize;
731 resizeNursery((nat)blocks);
735 /* mark the garbage collected CAFs as dead */
737 if (major_gc) { gcCAFs(); }
740 /* zero the scavenged static object list */
742 zero_static_object_list(scavenged_static_objects);
749 /* start any pending finalizers */
750 scheduleFinalizers(old_weak_ptr_list);
752 /* send exceptions to any threads which were about to die */
753 resurrectThreads(resurrected_threads);
755 /* check sanity after GC */
756 IF_DEBUG(sanity, checkSanity(N));
758 /* extra GC trace info */
759 IF_DEBUG(gc, stat_describe_gens());
762 /* symbol-table based profiling */
763 /* heapCensus(to_space); */ /* ToDo */
766 /* restore enclosing cost centre */
772 /* check for memory leaks if sanity checking is on */
773 IF_DEBUG(sanity, memInventory());
775 #ifdef RTS_GTK_VISUALS
776 if (RtsFlags.GcFlags.visuals) {
777 updateFrontPanelAfterGC( N, live );
781 /* ok, GC over: tell the stats department what happened. */
782 stat_endGC(allocated, collected, live, copied, N);
785 //@node Weak Pointers, Evacuation, Garbage Collect
786 //@subsection Weak Pointers
788 /* -----------------------------------------------------------------------------
791 traverse_weak_ptr_list is called possibly many times during garbage
792 collection. It returns a flag indicating whether it did any work
793 (i.e. called evacuate on any live pointers).
795 Invariant: traverse_weak_ptr_list is called when the heap is in an
796 idempotent state. That means that there are no pending
797 evacuate/scavenge operations. This invariant helps the weak
798 pointer code decide which weak pointers are dead - if there are no
799 new live weak pointers, then all the currently unreachable ones are
802 For generational GC: we just don't try to finalize weak pointers in
803 older generations than the one we're collecting. This could
804 probably be optimised by keeping per-generation lists of weak
805 pointers, but for a few weak pointers this scheme will work.
806 -------------------------------------------------------------------------- */
807 //@cindex traverse_weak_ptr_list
810 traverse_weak_ptr_list(void)
812 StgWeak *w, **last_w, *next_w;
814 rtsBool flag = rtsFalse;
816 if (weak_done) { return rtsFalse; }
818 /* doesn't matter where we evacuate values/finalizers to, since
819 * these pointers are treated as roots (iff the keys are alive).
823 last_w = &old_weak_ptr_list;
824 for (w = old_weak_ptr_list; w; w = next_w) {
826 /* First, this weak pointer might have been evacuated. If so,
827 * remove the forwarding pointer from the weak_ptr_list.
829 if (get_itbl(w)->type == EVACUATED) {
830 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
834 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
835 * called on a live weak pointer object. Just remove it.
837 if (w->header.info == &stg_DEAD_WEAK_info) {
838 next_w = ((StgDeadWeak *)w)->link;
843 ASSERT(get_itbl(w)->type == WEAK);
845 /* Now, check whether the key is reachable.
847 if ((new = isAlive(w->key))) {
849 /* evacuate the value and finalizer */
850 w->value = evacuate(w->value);
851 w->finalizer = evacuate(w->finalizer);
852 /* remove this weak ptr from the old_weak_ptr list */
854 /* and put it on the new weak ptr list */
856 w->link = weak_ptr_list;
859 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
869 /* Now deal with the all_threads list, which behaves somewhat like
870 * the weak ptr list. If we discover any threads that are about to
871 * become garbage, we wake them up and administer an exception.
874 StgTSO *t, *tmp, *next, **prev;
876 prev = &old_all_threads;
877 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
879 /* Threads which have finished or died get dropped from
882 switch (t->what_next) {
883 case ThreadRelocated:
889 next = t->global_link;
895 /* Threads which have already been determined to be alive are
896 * moved onto the all_threads list.
898 (StgClosure *)tmp = isAlive((StgClosure *)t);
900 next = tmp->global_link;
901 tmp->global_link = all_threads;
905 prev = &(t->global_link);
906 next = t->global_link;
911 /* If we didn't make any changes, then we can go round and kill all
912 * the dead weak pointers. The old_weak_ptr list is used as a list
913 * of pending finalizers later on.
915 if (flag == rtsFalse) {
916 cleanup_weak_ptr_list(&old_weak_ptr_list);
917 for (w = old_weak_ptr_list; w; w = w->link) {
918 w->finalizer = evacuate(w->finalizer);
921 /* And resurrect any threads which were about to become garbage.
924 StgTSO *t, *tmp, *next;
925 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
926 next = t->global_link;
927 (StgClosure *)tmp = evacuate((StgClosure *)t);
928 tmp->global_link = resurrected_threads;
929 resurrected_threads = tmp;
939 /* -----------------------------------------------------------------------------
940 After GC, the live weak pointer list may have forwarding pointers
941 on it, because a weak pointer object was evacuated after being
942 moved to the live weak pointer list. We remove those forwarding
945 Also, we don't consider weak pointer objects to be reachable, but
946 we must nevertheless consider them to be "live" and retain them.
947 Therefore any weak pointer objects which haven't as yet been
948 evacuated need to be evacuated now.
949 -------------------------------------------------------------------------- */
951 //@cindex cleanup_weak_ptr_list
954 cleanup_weak_ptr_list ( StgWeak **list )
956 StgWeak *w, **last_w;
959 for (w = *list; w; w = w->link) {
961 if (get_itbl(w)->type == EVACUATED) {
962 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
966 if (Bdescr((P_)w)->evacuated == 0) {
967 (StgClosure *)w = evacuate((StgClosure *)w);
974 /* -----------------------------------------------------------------------------
975 isAlive determines whether the given closure is still alive (after
976 a garbage collection) or not. It returns the new address of the
977 closure if it is alive, or NULL otherwise.
978 -------------------------------------------------------------------------- */
983 isAlive(StgClosure *p)
985 const StgInfoTable *info;
992 /* ToDo: for static closures, check the static link field.
993 * Problem here is that we sometimes don't set the link field, eg.
994 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
997 /* ignore closures in generations that we're not collecting. */
998 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
1002 switch (info->type) {
1007 case IND_OLDGEN: /* rely on compatible layout with StgInd */
1008 case IND_OLDGEN_PERM:
1009 /* follow indirections */
1010 p = ((StgInd *)p)->indirectee;
1015 return ((StgEvacuated *)p)->evacuee;
1018 size = bco_sizeW((StgBCO*)p);
1022 size = arr_words_sizeW((StgArrWords *)p);
1026 case MUT_ARR_PTRS_FROZEN:
1027 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1031 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1032 p = (StgClosure *)((StgTSO *)p)->link;
1036 size = tso_sizeW((StgTSO *)p);
1038 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1039 && Bdescr((P_)p)->evacuated)
1053 MarkRoot(StgClosure *root)
1055 # if 0 && defined(PAR) && defined(DEBUG)
1056 StgClosure *foo = evacuate(root);
1057 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1058 ASSERT(isAlive(foo)); // must be in to-space
1061 return evacuate(root);
1066 static void addBlock(step *step)
1068 bdescr *bd = allocBlock();
1069 bd->gen = step->gen;
1072 if (step->gen->no <= N) {
1078 step->hp_bd->free = step->hp;
1079 step->hp_bd->link = bd;
1080 step->hp = bd->start;
1081 step->hpLim = step->hp + BLOCK_SIZE_W;
1087 //@cindex upd_evacuee
1089 static __inline__ void
1090 upd_evacuee(StgClosure *p, StgClosure *dest)
1092 p->header.info = &stg_EVACUATED_info;
1093 ((StgEvacuated *)p)->evacuee = dest;
1098 static __inline__ StgClosure *
1099 copy(StgClosure *src, nat size, step *step)
1103 TICK_GC_WORDS_COPIED(size);
1104 /* Find out where we're going, using the handy "to" pointer in
1105 * the step of the source object. If it turns out we need to
1106 * evacuate to an older generation, adjust it here (see comment
1109 if (step->gen->no < evac_gen) {
1110 #ifdef NO_EAGER_PROMOTION
1111 failed_to_evac = rtsTrue;
1113 step = &generations[evac_gen].steps[0];
1117 /* chain a new block onto the to-space for the destination step if
1120 if (step->hp + size >= step->hpLim) {
1124 for(to = step->hp, from = (P_)src; size>0; --size) {
1130 upd_evacuee(src,(StgClosure *)dest);
1131 return (StgClosure *)dest;
1134 /* Special version of copy() for when we only want to copy the info
1135 * pointer of an object, but reserve some padding after it. This is
1136 * used to optimise evacuation of BLACKHOLEs.
1141 static __inline__ StgClosure *
1142 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1146 TICK_GC_WORDS_COPIED(size_to_copy);
1147 if (step->gen->no < evac_gen) {
1148 #ifdef NO_EAGER_PROMOTION
1149 failed_to_evac = rtsTrue;
1151 step = &generations[evac_gen].steps[0];
1155 if (step->hp + size_to_reserve >= step->hpLim) {
1159 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1164 step->hp += size_to_reserve;
1165 upd_evacuee(src,(StgClosure *)dest);
1166 return (StgClosure *)dest;
1169 //@node Evacuation, Scavenging, Weak Pointers
1170 //@subsection Evacuation
1172 /* -----------------------------------------------------------------------------
1173 Evacuate a large object
1175 This just consists of removing the object from the (doubly-linked)
1176 large_alloc_list, and linking it on to the (singly-linked)
1177 new_large_objects list, from where it will be scavenged later.
1179 Convention: bd->evacuated is /= 0 for a large object that has been
1180 evacuated, or 0 otherwise.
1181 -------------------------------------------------------------------------- */
1183 //@cindex evacuate_large
1186 evacuate_large(StgPtr p, rtsBool mutable)
1188 bdescr *bd = Bdescr(p);
1191 /* should point to the beginning of the block */
1192 ASSERT(((W_)p & BLOCK_MASK) == 0);
1194 /* already evacuated? */
1195 if (bd->evacuated) {
1196 /* Don't forget to set the failed_to_evac flag if we didn't get
1197 * the desired destination (see comments in evacuate()).
1199 if (bd->gen->no < evac_gen) {
1200 failed_to_evac = rtsTrue;
1201 TICK_GC_FAILED_PROMOTION();
1207 /* remove from large_object list */
1209 bd->back->link = bd->link;
1210 } else { /* first object in the list */
1211 step->large_objects = bd->link;
1214 bd->link->back = bd->back;
1217 /* link it on to the evacuated large object list of the destination step
1219 step = bd->step->to;
1220 if (step->gen->no < evac_gen) {
1221 #ifdef NO_EAGER_PROMOTION
1222 failed_to_evac = rtsTrue;
1224 step = &generations[evac_gen].steps[0];
1229 bd->gen = step->gen;
1230 bd->link = step->new_large_objects;
1231 step->new_large_objects = bd;
1235 recordMutable((StgMutClosure *)p);
1239 /* -----------------------------------------------------------------------------
1240 Adding a MUT_CONS to an older generation.
1242 This is necessary from time to time when we end up with an
1243 old-to-new generation pointer in a non-mutable object. We defer
1244 the promotion until the next GC.
1245 -------------------------------------------------------------------------- */
1250 mkMutCons(StgClosure *ptr, generation *gen)
1255 step = &gen->steps[0];
1257 /* chain a new block onto the to-space for the destination step if
1260 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1264 q = (StgMutVar *)step->hp;
1265 step->hp += sizeofW(StgMutVar);
1267 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1269 recordOldToNewPtrs((StgMutClosure *)q);
1271 return (StgClosure *)q;
1274 /* -----------------------------------------------------------------------------
1277 This is called (eventually) for every live object in the system.
1279 The caller to evacuate specifies a desired generation in the
1280 evac_gen global variable. The following conditions apply to
1281 evacuating an object which resides in generation M when we're
1282 collecting up to generation N
1286 else evac to step->to
1288 if M < evac_gen evac to evac_gen, step 0
1290 if the object is already evacuated, then we check which generation
1293 if M >= evac_gen do nothing
1294 if M < evac_gen set failed_to_evac flag to indicate that we
1295 didn't manage to evacuate this object into evac_gen.
1297 -------------------------------------------------------------------------- */
1301 evacuate(StgClosure *q)
1306 const StgInfoTable *info;
1309 if (HEAP_ALLOCED(q)) {
1311 if (bd->gen->no > N) {
1312 /* Can't evacuate this object, because it's in a generation
1313 * older than the ones we're collecting. Let's hope that it's
1314 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1316 if (bd->gen->no < evac_gen) {
1318 failed_to_evac = rtsTrue;
1319 TICK_GC_FAILED_PROMOTION();
1323 step = bd->step->to;
1326 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1329 /* make sure the info pointer is into text space */
1330 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1331 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1334 if (info->type==RBH) {
1335 info = REVERT_INFOPTR(info);
1337 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1338 q, info_type(q), info, info_type_by_ip(info)));
1342 switch (info -> type) {
1346 nat size = bco_sizeW((StgBCO*)q);
1348 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1349 evacuate_large((P_)q, rtsFalse);
1352 /* just copy the block */
1353 to = copy(q,size,step);
1359 ASSERT(q->header.info != &stg_MUT_CONS_info);
1361 to = copy(q,sizeW_fromITBL(info),step);
1362 recordMutable((StgMutClosure *)to);
1369 return copy(q,sizeofW(StgHeader)+1,step);
1371 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1376 #ifdef NO_PROMOTE_THUNKS
1377 if (bd->gen->no == 0 &&
1378 bd->step->no != 0 &&
1379 bd->step->no == bd->gen->n_steps-1) {
1383 return copy(q,sizeofW(StgHeader)+2,step);
1391 return copy(q,sizeofW(StgHeader)+2,step);
1397 case IND_OLDGEN_PERM:
1403 return copy(q,sizeW_fromITBL(info),step);
1406 case SE_CAF_BLACKHOLE:
1409 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1412 to = copy(q,BLACKHOLE_sizeW(),step);
1413 recordMutable((StgMutClosure *)to);
1416 case THUNK_SELECTOR:
1418 const StgInfoTable* selectee_info;
1419 StgClosure* selectee = ((StgSelector*)q)->selectee;
1422 selectee_info = get_itbl(selectee);
1423 switch (selectee_info->type) {
1432 StgWord32 offset = info->layout.selector_offset;
1434 /* check that the size is in range */
1436 (StgWord32)(selectee_info->layout.payload.ptrs +
1437 selectee_info->layout.payload.nptrs));
1439 /* perform the selection! */
1440 q = selectee->payload[offset];
1442 /* if we're already in to-space, there's no need to continue
1443 * with the evacuation, just update the source address with
1444 * a pointer to the (evacuated) constructor field.
1446 if (HEAP_ALLOCED(q)) {
1447 bdescr *bd = Bdescr((P_)q);
1448 if (bd->evacuated) {
1449 if (bd->gen->no < evac_gen) {
1450 failed_to_evac = rtsTrue;
1451 TICK_GC_FAILED_PROMOTION();
1457 /* otherwise, carry on and evacuate this constructor field,
1458 * (but not the constructor itself)
1467 case IND_OLDGEN_PERM:
1468 selectee = ((StgInd *)selectee)->indirectee;
1472 selectee = ((StgCAF *)selectee)->value;
1476 selectee = ((StgEvacuated *)selectee)->evacuee;
1487 case THUNK_SELECTOR:
1488 /* aargh - do recursively???? */
1491 case SE_CAF_BLACKHOLE:
1495 /* not evaluated yet */
1499 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1500 (int)(selectee_info->type));
1503 return copy(q,THUNK_SELECTOR_sizeW(),step);
1507 /* follow chains of indirections, don't evacuate them */
1508 q = ((StgInd*)q)->indirectee;
1512 if (info->srt_len > 0 && major_gc &&
1513 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1514 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1515 static_objects = (StgClosure *)q;
1520 if (info->srt_len > 0 && major_gc &&
1521 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1522 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1523 static_objects = (StgClosure *)q;
1528 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1529 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1530 static_objects = (StgClosure *)q;
1535 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1536 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1537 static_objects = (StgClosure *)q;
1541 case CONSTR_INTLIKE:
1542 case CONSTR_CHARLIKE:
1543 case CONSTR_NOCAF_STATIC:
1544 /* no need to put these on the static linked list, they don't need
1559 /* shouldn't see these */
1560 barf("evacuate: stack frame at %p\n", q);
1564 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1565 * of stack, tagging and all.
1567 * They can be larger than a block in size. Both are only
1568 * allocated via allocate(), so they should be chained on to the
1569 * large_object list.
1572 nat size = pap_sizeW((StgPAP*)q);
1573 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1574 evacuate_large((P_)q, rtsFalse);
1577 return copy(q,size,step);
1582 /* Already evacuated, just return the forwarding address.
1583 * HOWEVER: if the requested destination generation (evac_gen) is
1584 * older than the actual generation (because the object was
1585 * already evacuated to a younger generation) then we have to
1586 * set the failed_to_evac flag to indicate that we couldn't
1587 * manage to promote the object to the desired generation.
1589 if (evac_gen > 0) { /* optimisation */
1590 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1591 if (Bdescr((P_)p)->gen->no < evac_gen) {
1592 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1593 failed_to_evac = rtsTrue;
1594 TICK_GC_FAILED_PROMOTION();
1597 return ((StgEvacuated*)q)->evacuee;
1601 nat size = arr_words_sizeW((StgArrWords *)q);
1603 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1604 evacuate_large((P_)q, rtsFalse);
1607 /* just copy the block */
1608 return copy(q,size,step);
1613 case MUT_ARR_PTRS_FROZEN:
1615 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1617 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1618 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1621 /* just copy the block */
1622 to = copy(q,size,step);
1623 if (info->type == MUT_ARR_PTRS) {
1624 recordMutable((StgMutClosure *)to);
1632 StgTSO *tso = (StgTSO *)q;
1633 nat size = tso_sizeW(tso);
1636 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1638 if (tso->what_next == ThreadRelocated) {
1639 q = (StgClosure *)tso->link;
1643 /* Large TSOs don't get moved, so no relocation is required.
1645 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1646 evacuate_large((P_)q, rtsTrue);
1649 /* To evacuate a small TSO, we need to relocate the update frame
1653 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1655 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1657 /* relocate the stack pointers... */
1658 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1659 new_tso->sp = (StgPtr)new_tso->sp + diff;
1661 relocate_TSO(tso, new_tso);
1663 recordMutable((StgMutClosure *)new_tso);
1664 return (StgClosure *)new_tso;
1669 case RBH: // cf. BLACKHOLE_BQ
1671 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1672 to = copy(q,BLACKHOLE_sizeW(),step);
1673 //ToDo: derive size etc from reverted IP
1674 //to = copy(q,size,step);
1675 recordMutable((StgMutClosure *)to);
1677 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1678 q, info_type(q), to, info_type(to)));
1683 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1684 to = copy(q,sizeofW(StgBlockedFetch),step);
1686 belch("@@ evacuate: %p (%s) to %p (%s)",
1687 q, info_type(q), to, info_type(to)));
1691 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1692 to = copy(q,sizeofW(StgFetchMe),step);
1694 belch("@@ evacuate: %p (%s) to %p (%s)",
1695 q, info_type(q), to, info_type(to)));
1699 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1700 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1702 belch("@@ evacuate: %p (%s) to %p (%s)",
1703 q, info_type(q), to, info_type(to)));
1708 barf("evacuate: strange closure type %d", (int)(info->type));
1714 /* -----------------------------------------------------------------------------
1715 relocate_TSO is called just after a TSO has been copied from src to
1716 dest. It adjusts the update frame list for the new location.
1717 -------------------------------------------------------------------------- */
1718 //@cindex relocate_TSO
1721 relocate_TSO(StgTSO *src, StgTSO *dest)
1728 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1732 while ((P_)su < dest->stack + dest->stack_size) {
1733 switch (get_itbl(su)->type) {
1735 /* GCC actually manages to common up these three cases! */
1738 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1743 cf = (StgCatchFrame *)su;
1744 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1749 sf = (StgSeqFrame *)su;
1750 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1759 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1767 //@node Scavenging, Reverting CAFs, Evacuation
1768 //@subsection Scavenging
1770 //@cindex scavenge_srt
1773 scavenge_srt(const StgInfoTable *info)
1775 StgClosure **srt, **srt_end;
1777 /* evacuate the SRT. If srt_len is zero, then there isn't an
1778 * srt field in the info table. That's ok, because we'll
1779 * never dereference it.
1781 srt = (StgClosure **)(info->srt);
1782 srt_end = srt + info->srt_len;
1783 for (; srt < srt_end; srt++) {
1784 /* Special-case to handle references to closures hiding out in DLLs, since
1785 double indirections required to get at those. The code generator knows
1786 which is which when generating the SRT, so it stores the (indirect)
1787 reference to the DLL closure in the table by first adding one to it.
1788 We check for this here, and undo the addition before evacuating it.
1790 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1791 closure that's fixed at link-time, and no extra magic is required.
1793 #ifdef ENABLE_WIN32_DLL_SUPPORT
1794 if ( (unsigned long)(*srt) & 0x1 ) {
1795 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1805 /* -----------------------------------------------------------------------------
1807 -------------------------------------------------------------------------- */
1810 scavengeTSO (StgTSO *tso)
1812 /* chase the link field for any TSOs on the same queue */
1813 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1814 if ( tso->why_blocked == BlockedOnMVar
1815 || tso->why_blocked == BlockedOnBlackHole
1816 || tso->why_blocked == BlockedOnException
1818 || tso->why_blocked == BlockedOnGA
1819 || tso->why_blocked == BlockedOnGA_NoSend
1822 tso->block_info.closure = evacuate(tso->block_info.closure);
1824 if ( tso->blocked_exceptions != NULL ) {
1825 tso->blocked_exceptions =
1826 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1828 /* scavenge this thread's stack */
1829 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1832 /* -----------------------------------------------------------------------------
1833 Scavenge a given step until there are no more objects in this step
1836 evac_gen is set by the caller to be either zero (for a step in a
1837 generation < N) or G where G is the generation of the step being
1840 We sometimes temporarily change evac_gen back to zero if we're
1841 scavenging a mutable object where early promotion isn't such a good
1843 -------------------------------------------------------------------------- */
1847 scavenge(step *step)
1850 const StgInfoTable *info;
1852 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1857 failed_to_evac = rtsFalse;
1859 /* scavenge phase - standard breadth-first scavenging of the
1863 while (bd != step->hp_bd || p < step->hp) {
1865 /* If we're at the end of this block, move on to the next block */
1866 if (bd != step->hp_bd && p == bd->free) {
1872 q = p; /* save ptr to object */
1874 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1875 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1877 info = get_itbl((StgClosure *)p);
1879 if (info->type==RBH)
1880 info = REVERT_INFOPTR(info);
1883 switch (info -> type) {
1887 StgBCO* bco = (StgBCO *)p;
1889 for (i = 0; i < bco->n_ptrs; i++) {
1890 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1892 p += bco_sizeW(bco);
1897 /* treat MVars specially, because we don't want to evacuate the
1898 * mut_link field in the middle of the closure.
1901 StgMVar *mvar = ((StgMVar *)p);
1903 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1904 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1905 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1906 p += sizeofW(StgMVar);
1907 evac_gen = saved_evac_gen;
1915 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1916 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1917 p += sizeofW(StgHeader) + 2;
1922 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1923 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1929 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1930 p += sizeofW(StgHeader) + 1;
1935 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1941 p += sizeofW(StgHeader) + 1;
1948 p += sizeofW(StgHeader) + 2;
1955 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1956 p += sizeofW(StgHeader) + 2;
1971 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1972 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1973 (StgClosure *)*p = evacuate((StgClosure *)*p);
1975 p += info->layout.payload.nptrs;
1980 if (step->gen->no != 0) {
1981 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
1984 case IND_OLDGEN_PERM:
1985 ((StgIndOldGen *)p)->indirectee =
1986 evacuate(((StgIndOldGen *)p)->indirectee);
1987 if (failed_to_evac) {
1988 failed_to_evac = rtsFalse;
1989 recordOldToNewPtrs((StgMutClosure *)p);
1991 p += sizeofW(StgIndOldGen);
1996 StgCAF *caf = (StgCAF *)p;
1998 caf->body = evacuate(caf->body);
1999 if (failed_to_evac) {
2000 failed_to_evac = rtsFalse;
2001 recordOldToNewPtrs((StgMutClosure *)p);
2003 caf->mut_link = NULL;
2005 p += sizeofW(StgCAF);
2011 StgCAF *caf = (StgCAF *)p;
2013 caf->body = evacuate(caf->body);
2014 caf->value = evacuate(caf->value);
2015 if (failed_to_evac) {
2016 failed_to_evac = rtsFalse;
2017 recordOldToNewPtrs((StgMutClosure *)p);
2019 caf->mut_link = NULL;
2021 p += sizeofW(StgCAF);
2026 /* ignore MUT_CONSs */
2027 if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) {
2029 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2030 evac_gen = saved_evac_gen;
2032 p += sizeofW(StgMutVar);
2036 case SE_CAF_BLACKHOLE:
2039 p += BLACKHOLE_sizeW();
2044 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2045 (StgClosure *)bh->blocking_queue =
2046 evacuate((StgClosure *)bh->blocking_queue);
2047 if (failed_to_evac) {
2048 failed_to_evac = rtsFalse;
2049 recordMutable((StgMutClosure *)bh);
2051 p += BLACKHOLE_sizeW();
2055 case THUNK_SELECTOR:
2057 StgSelector *s = (StgSelector *)p;
2058 s->selectee = evacuate(s->selectee);
2059 p += THUNK_SELECTOR_sizeW();
2065 barf("scavenge:IND???\n");
2067 case CONSTR_INTLIKE:
2068 case CONSTR_CHARLIKE:
2070 case CONSTR_NOCAF_STATIC:
2074 /* Shouldn't see a static object here. */
2075 barf("scavenge: STATIC object\n");
2087 /* Shouldn't see stack frames here. */
2088 barf("scavenge: stack frame\n");
2090 case AP_UPD: /* same as PAPs */
2092 /* Treat a PAP just like a section of stack, not forgetting to
2093 * evacuate the function pointer too...
2096 StgPAP* pap = (StgPAP *)p;
2098 pap->fun = evacuate(pap->fun);
2099 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2100 p += pap_sizeW(pap);
2105 /* nothing to follow */
2106 p += arr_words_sizeW((StgArrWords *)p);
2110 /* follow everything */
2114 evac_gen = 0; /* repeatedly mutable */
2115 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2116 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2117 (StgClosure *)*p = evacuate((StgClosure *)*p);
2119 evac_gen = saved_evac_gen;
2123 case MUT_ARR_PTRS_FROZEN:
2124 /* follow everything */
2126 StgPtr start = p, next;
2128 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2129 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2130 (StgClosure *)*p = evacuate((StgClosure *)*p);
2132 if (failed_to_evac) {
2133 /* we can do this easier... */
2134 recordMutable((StgMutClosure *)start);
2135 failed_to_evac = rtsFalse;
2142 StgTSO *tso = (StgTSO *)p;
2145 evac_gen = saved_evac_gen;
2146 p += tso_sizeW(tso);
2151 case RBH: // cf. BLACKHOLE_BQ
2153 // nat size, ptrs, nonptrs, vhs;
2155 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2156 StgRBH *rbh = (StgRBH *)p;
2157 (StgClosure *)rbh->blocking_queue =
2158 evacuate((StgClosure *)rbh->blocking_queue);
2159 if (failed_to_evac) {
2160 failed_to_evac = rtsFalse;
2161 recordMutable((StgMutClosure *)rbh);
2164 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2165 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2166 // ToDo: use size of reverted closure here!
2167 p += BLACKHOLE_sizeW();
2173 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2174 /* follow the pointer to the node which is being demanded */
2175 (StgClosure *)bf->node =
2176 evacuate((StgClosure *)bf->node);
2177 /* follow the link to the rest of the blocking queue */
2178 (StgClosure *)bf->link =
2179 evacuate((StgClosure *)bf->link);
2180 if (failed_to_evac) {
2181 failed_to_evac = rtsFalse;
2182 recordMutable((StgMutClosure *)bf);
2185 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2186 bf, info_type((StgClosure *)bf),
2187 bf->node, info_type(bf->node)));
2188 p += sizeofW(StgBlockedFetch);
2194 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2195 p, info_type((StgClosure *)p)));
2196 p += sizeofW(StgFetchMe);
2197 break; // nothing to do in this case
2199 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2201 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2202 (StgClosure *)fmbq->blocking_queue =
2203 evacuate((StgClosure *)fmbq->blocking_queue);
2204 if (failed_to_evac) {
2205 failed_to_evac = rtsFalse;
2206 recordMutable((StgMutClosure *)fmbq);
2209 belch("@@ scavenge: %p (%s) exciting, isn't it",
2210 p, info_type((StgClosure *)p)));
2211 p += sizeofW(StgFetchMeBlockingQueue);
2217 barf("scavenge: unimplemented/strange closure type %d @ %p",
2221 barf("scavenge: unimplemented/strange closure type %d @ %p",
2225 /* If we didn't manage to promote all the objects pointed to by
2226 * the current object, then we have to designate this object as
2227 * mutable (because it contains old-to-new generation pointers).
2229 if (failed_to_evac) {
2230 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2231 failed_to_evac = rtsFalse;
2239 /* -----------------------------------------------------------------------------
2240 Scavenge one object.
2242 This is used for objects that are temporarily marked as mutable
2243 because they contain old-to-new generation pointers. Only certain
2244 objects can have this property.
2245 -------------------------------------------------------------------------- */
2246 //@cindex scavenge_one
2249 scavenge_one(StgClosure *p)
2251 const StgInfoTable *info;
2254 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2255 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2260 if (info->type==RBH)
2261 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2264 switch (info -> type) {
2267 case FUN_1_0: /* hardly worth specialising these guys */
2287 case IND_OLDGEN_PERM:
2292 end = (P_)p->payload + info->layout.payload.ptrs;
2293 for (q = (P_)p->payload; q < end; q++) {
2294 (StgClosure *)*q = evacuate((StgClosure *)*q);
2300 case SE_CAF_BLACKHOLE:
2305 case THUNK_SELECTOR:
2307 StgSelector *s = (StgSelector *)p;
2308 s->selectee = evacuate(s->selectee);
2312 case AP_UPD: /* same as PAPs */
2314 /* Treat a PAP just like a section of stack, not forgetting to
2315 * evacuate the function pointer too...
2318 StgPAP* pap = (StgPAP *)p;
2320 pap->fun = evacuate(pap->fun);
2321 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2326 /* This might happen if for instance a MUT_CONS was pointing to a
2327 * THUNK which has since been updated. The IND_OLDGEN will
2328 * be on the mutable list anyway, so we don't need to do anything
2334 barf("scavenge_one: strange object %d", (int)(info->type));
2337 no_luck = failed_to_evac;
2338 failed_to_evac = rtsFalse;
2343 /* -----------------------------------------------------------------------------
2344 Scavenging mutable lists.
2346 We treat the mutable list of each generation > N (i.e. all the
2347 generations older than the one being collected) as roots. We also
2348 remove non-mutable objects from the mutable list at this point.
2349 -------------------------------------------------------------------------- */
2350 //@cindex scavenge_mut_once_list
2353 scavenge_mut_once_list(generation *gen)
2355 const StgInfoTable *info;
2356 StgMutClosure *p, *next, *new_list;
2358 p = gen->mut_once_list;
2359 new_list = END_MUT_LIST;
2363 failed_to_evac = rtsFalse;
2365 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2367 /* make sure the info pointer is into text space */
2368 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2369 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2373 if (info->type==RBH)
2374 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2376 switch(info->type) {
2379 case IND_OLDGEN_PERM:
2381 /* Try to pull the indirectee into this generation, so we can
2382 * remove the indirection from the mutable list.
2384 ((StgIndOldGen *)p)->indirectee =
2385 evacuate(((StgIndOldGen *)p)->indirectee);
2388 if (RtsFlags.DebugFlags.gc)
2389 /* Debugging code to print out the size of the thing we just
2393 StgPtr start = gen->steps[0].scan;
2394 bdescr *start_bd = gen->steps[0].scan_bd;
2396 scavenge(&gen->steps[0]);
2397 if (start_bd != gen->steps[0].scan_bd) {
2398 size += (P_)BLOCK_ROUND_UP(start) - start;
2399 start_bd = start_bd->link;
2400 while (start_bd != gen->steps[0].scan_bd) {
2401 size += BLOCK_SIZE_W;
2402 start_bd = start_bd->link;
2404 size += gen->steps[0].scan -
2405 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2407 size = gen->steps[0].scan - start;
2409 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2413 /* failed_to_evac might happen if we've got more than two
2414 * generations, we're collecting only generation 0, the
2415 * indirection resides in generation 2 and the indirectee is
2418 if (failed_to_evac) {
2419 failed_to_evac = rtsFalse;
2420 p->mut_link = new_list;
2423 /* the mut_link field of an IND_STATIC is overloaded as the
2424 * static link field too (it just so happens that we don't need
2425 * both at the same time), so we need to NULL it out when
2426 * removing this object from the mutable list because the static
2427 * link fields are all assumed to be NULL before doing a major
2435 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2436 * it from the mutable list if possible by promoting whatever it
2439 ASSERT(p->header.info == &stg_MUT_CONS_info);
2440 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2441 /* didn't manage to promote everything, so put the
2442 * MUT_CONS back on the list.
2444 p->mut_link = new_list;
2451 StgCAF *caf = (StgCAF *)p;
2452 caf->body = evacuate(caf->body);
2453 caf->value = evacuate(caf->value);
2454 if (failed_to_evac) {
2455 failed_to_evac = rtsFalse;
2456 p->mut_link = new_list;
2466 StgCAF *caf = (StgCAF *)p;
2467 caf->body = evacuate(caf->body);
2468 if (failed_to_evac) {
2469 failed_to_evac = rtsFalse;
2470 p->mut_link = new_list;
2479 /* shouldn't have anything else on the mutables list */
2480 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2484 gen->mut_once_list = new_list;
2487 //@cindex scavenge_mutable_list
2490 scavenge_mutable_list(generation *gen)
2492 const StgInfoTable *info;
2493 StgMutClosure *p, *next;
2495 p = gen->saved_mut_list;
2499 failed_to_evac = rtsFalse;
2501 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2503 /* make sure the info pointer is into text space */
2504 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2505 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2509 if (info->type==RBH)
2510 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2512 switch(info->type) {
2514 case MUT_ARR_PTRS_FROZEN:
2515 /* remove this guy from the mutable list, but follow the ptrs
2516 * anyway (and make sure they get promoted to this gen).
2521 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2523 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2524 (StgClosure *)*q = evacuate((StgClosure *)*q);
2528 if (failed_to_evac) {
2529 failed_to_evac = rtsFalse;
2530 p->mut_link = gen->mut_list;
2537 /* follow everything */
2538 p->mut_link = gen->mut_list;
2543 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2544 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2545 (StgClosure *)*q = evacuate((StgClosure *)*q);
2551 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2552 * it from the mutable list if possible by promoting whatever it
2555 ASSERT(p->header.info != &stg_MUT_CONS_info);
2556 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2557 p->mut_link = gen->mut_list;
2563 StgMVar *mvar = (StgMVar *)p;
2564 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2565 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2566 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2567 p->mut_link = gen->mut_list;
2574 StgTSO *tso = (StgTSO *)p;
2578 /* Don't take this TSO off the mutable list - it might still
2579 * point to some younger objects (because we set evac_gen to 0
2582 tso->mut_link = gen->mut_list;
2583 gen->mut_list = (StgMutClosure *)tso;
2589 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2590 (StgClosure *)bh->blocking_queue =
2591 evacuate((StgClosure *)bh->blocking_queue);
2592 p->mut_link = gen->mut_list;
2597 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2600 case IND_OLDGEN_PERM:
2601 /* Try to pull the indirectee into this generation, so we can
2602 * remove the indirection from the mutable list.
2605 ((StgIndOldGen *)p)->indirectee =
2606 evacuate(((StgIndOldGen *)p)->indirectee);
2609 if (failed_to_evac) {
2610 failed_to_evac = rtsFalse;
2611 p->mut_link = gen->mut_once_list;
2612 gen->mut_once_list = p;
2619 // HWL: check whether all of these are necessary
2621 case RBH: // cf. BLACKHOLE_BQ
2623 // nat size, ptrs, nonptrs, vhs;
2625 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2626 StgRBH *rbh = (StgRBH *)p;
2627 (StgClosure *)rbh->blocking_queue =
2628 evacuate((StgClosure *)rbh->blocking_queue);
2629 if (failed_to_evac) {
2630 failed_to_evac = rtsFalse;
2631 recordMutable((StgMutClosure *)rbh);
2633 // ToDo: use size of reverted closure here!
2634 p += BLACKHOLE_sizeW();
2640 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2641 /* follow the pointer to the node which is being demanded */
2642 (StgClosure *)bf->node =
2643 evacuate((StgClosure *)bf->node);
2644 /* follow the link to the rest of the blocking queue */
2645 (StgClosure *)bf->link =
2646 evacuate((StgClosure *)bf->link);
2647 if (failed_to_evac) {
2648 failed_to_evac = rtsFalse;
2649 recordMutable((StgMutClosure *)bf);
2651 p += sizeofW(StgBlockedFetch);
2656 p += sizeofW(StgFetchMe);
2657 break; // nothing to do in this case
2659 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2661 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2662 (StgClosure *)fmbq->blocking_queue =
2663 evacuate((StgClosure *)fmbq->blocking_queue);
2664 if (failed_to_evac) {
2665 failed_to_evac = rtsFalse;
2666 recordMutable((StgMutClosure *)fmbq);
2668 p += sizeofW(StgFetchMeBlockingQueue);
2674 /* shouldn't have anything else on the mutables list */
2675 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2680 //@cindex scavenge_static
2683 scavenge_static(void)
2685 StgClosure* p = static_objects;
2686 const StgInfoTable *info;
2688 /* Always evacuate straight to the oldest generation for static
2690 evac_gen = oldest_gen->no;
2692 /* keep going until we've scavenged all the objects on the linked
2694 while (p != END_OF_STATIC_LIST) {
2698 if (info->type==RBH)
2699 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2701 /* make sure the info pointer is into text space */
2702 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2703 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2705 /* Take this object *off* the static_objects list,
2706 * and put it on the scavenged_static_objects list.
2708 static_objects = STATIC_LINK(info,p);
2709 STATIC_LINK(info,p) = scavenged_static_objects;
2710 scavenged_static_objects = p;
2712 switch (info -> type) {
2716 StgInd *ind = (StgInd *)p;
2717 ind->indirectee = evacuate(ind->indirectee);
2719 /* might fail to evacuate it, in which case we have to pop it
2720 * back on the mutable list (and take it off the
2721 * scavenged_static list because the static link and mut link
2722 * pointers are one and the same).
2724 if (failed_to_evac) {
2725 failed_to_evac = rtsFalse;
2726 scavenged_static_objects = STATIC_LINK(info,p);
2727 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2728 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2742 next = (P_)p->payload + info->layout.payload.ptrs;
2743 /* evacuate the pointers */
2744 for (q = (P_)p->payload; q < next; q++) {
2745 (StgClosure *)*q = evacuate((StgClosure *)*q);
2751 barf("scavenge_static: strange closure %d", (int)(info->type));
2754 ASSERT(failed_to_evac == rtsFalse);
2756 /* get the next static object from the list. Remember, there might
2757 * be more stuff on this list now that we've done some evacuating!
2758 * (static_objects is a global)
2764 /* -----------------------------------------------------------------------------
2765 scavenge_stack walks over a section of stack and evacuates all the
2766 objects pointed to by it. We can use the same code for walking
2767 PAPs, since these are just sections of copied stack.
2768 -------------------------------------------------------------------------- */
2769 //@cindex scavenge_stack
2772 scavenge_stack(StgPtr p, StgPtr stack_end)
2775 const StgInfoTable* info;
2778 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2781 * Each time around this loop, we are looking at a chunk of stack
2782 * that starts with either a pending argument section or an
2783 * activation record.
2786 while (p < stack_end) {
2789 /* If we've got a tag, skip over that many words on the stack */
2790 if (IS_ARG_TAG((W_)q)) {
2795 /* Is q a pointer to a closure?
2797 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2799 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2800 ASSERT(closure_STATIC((StgClosure *)q));
2802 /* otherwise, must be a pointer into the allocation space. */
2805 (StgClosure *)*p = evacuate((StgClosure *)q);
2811 * Otherwise, q must be the info pointer of an activation
2812 * record. All activation records have 'bitmap' style layout
2815 info = get_itbl((StgClosure *)p);
2817 switch (info->type) {
2819 /* Dynamic bitmap: the mask is stored on the stack */
2821 bitmap = ((StgRetDyn *)p)->liveness;
2822 p = (P_)&((StgRetDyn *)p)->payload[0];
2825 /* probably a slow-entry point return address: */
2833 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2834 old_p, p, old_p+1));
2836 p++; /* what if FHS!=1 !? -- HWL */
2841 /* Specialised code for update frames, since they're so common.
2842 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2843 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2847 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2849 nat type = get_itbl(frame->updatee)->type;
2851 p += sizeofW(StgUpdateFrame);
2852 if (type == EVACUATED) {
2853 frame->updatee = evacuate(frame->updatee);
2856 bdescr *bd = Bdescr((P_)frame->updatee);
2858 if (bd->gen->no > N) {
2859 if (bd->gen->no < evac_gen) {
2860 failed_to_evac = rtsTrue;
2865 /* Don't promote blackholes */
2867 if (!(step->gen->no == 0 &&
2869 step->no == step->gen->n_steps-1)) {
2876 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2877 sizeofW(StgHeader), step);
2878 frame->updatee = to;
2881 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2882 frame->updatee = to;
2883 recordMutable((StgMutClosure *)to);
2886 /* will never be SE_{,CAF_}BLACKHOLE, since we
2887 don't push an update frame for single-entry thunks. KSW 1999-01. */
2888 barf("scavenge_stack: UPDATE_FRAME updatee");
2893 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2900 bitmap = info->layout.bitmap;
2902 /* this assumes that the payload starts immediately after the info-ptr */
2904 while (bitmap != 0) {
2905 if ((bitmap & 1) == 0) {
2906 (StgClosure *)*p = evacuate((StgClosure *)*p);
2909 bitmap = bitmap >> 1;
2916 /* large bitmap (> 32 entries) */
2921 StgLargeBitmap *large_bitmap;
2924 large_bitmap = info->layout.large_bitmap;
2927 for (i=0; i<large_bitmap->size; i++) {
2928 bitmap = large_bitmap->bitmap[i];
2929 q = p + sizeof(W_) * 8;
2930 while (bitmap != 0) {
2931 if ((bitmap & 1) == 0) {
2932 (StgClosure *)*p = evacuate((StgClosure *)*p);
2935 bitmap = bitmap >> 1;
2937 if (i+1 < large_bitmap->size) {
2939 (StgClosure *)*p = evacuate((StgClosure *)*p);
2945 /* and don't forget to follow the SRT */
2950 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2955 /*-----------------------------------------------------------------------------
2956 scavenge the large object list.
2958 evac_gen set by caller; similar games played with evac_gen as with
2959 scavenge() - see comment at the top of scavenge(). Most large
2960 objects are (repeatedly) mutable, so most of the time evac_gen will
2962 --------------------------------------------------------------------------- */
2963 //@cindex scavenge_large
2966 scavenge_large(step *step)
2970 const StgInfoTable* info;
2971 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2973 evac_gen = 0; /* most objects are mutable */
2974 bd = step->new_large_objects;
2976 for (; bd != NULL; bd = step->new_large_objects) {
2978 /* take this object *off* the large objects list and put it on
2979 * the scavenged large objects list. This is so that we can
2980 * treat new_large_objects as a stack and push new objects on
2981 * the front when evacuating.
2983 step->new_large_objects = bd->link;
2984 dbl_link_onto(bd, &step->scavenged_large_objects);
2987 info = get_itbl((StgClosure *)p);
2989 switch (info->type) {
2991 /* only certain objects can be "large"... */
2994 /* nothing to follow */
2998 /* follow everything */
3002 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3003 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3004 (StgClosure *)*p = evacuate((StgClosure *)*p);
3009 case MUT_ARR_PTRS_FROZEN:
3010 /* follow everything */
3012 StgPtr start = p, next;
3014 evac_gen = saved_evac_gen; /* not really mutable */
3015 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3016 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3017 (StgClosure *)*p = evacuate((StgClosure *)*p);
3020 if (failed_to_evac) {
3021 recordMutable((StgMutClosure *)start);
3028 StgBCO* bco = (StgBCO *)p;
3030 evac_gen = saved_evac_gen;
3031 for (i = 0; i < bco->n_ptrs; i++) {
3032 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
3039 scavengeTSO((StgTSO *)p);
3045 StgPAP* pap = (StgPAP *)p;
3047 evac_gen = saved_evac_gen; /* not really mutable */
3048 pap->fun = evacuate(pap->fun);
3049 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3055 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3060 //@cindex zero_static_object_list
3063 zero_static_object_list(StgClosure* first_static)
3067 const StgInfoTable *info;
3069 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3071 link = STATIC_LINK(info, p);
3072 STATIC_LINK(info,p) = NULL;
3076 /* This function is only needed because we share the mutable link
3077 * field with the static link field in an IND_STATIC, so we have to
3078 * zero the mut_link field before doing a major GC, which needs the
3079 * static link field.
3081 * It doesn't do any harm to zero all the mutable link fields on the
3084 //@cindex zero_mutable_list
3087 zero_mutable_list( StgMutClosure *first )
3089 StgMutClosure *next, *c;
3091 for (c = first; c != END_MUT_LIST; c = next) {
3097 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3098 //@subsection Reverting CAFs
3100 /* -----------------------------------------------------------------------------
3102 -------------------------------------------------------------------------- */
3103 //@cindex RevertCAFs
3105 void RevertCAFs(void)
3110 /* Deal with CAFs created by compiled code. */
3111 for (i = 0; i < usedECafTable; i++) {
3112 SET_INFO( (StgInd*)(ecafTable[i].closure), ecafTable[i].origItbl );
3113 ((StgInd*)(ecafTable[i].closure))->indirectee = 0;
3116 /* Deal with CAFs created by the interpreter. */
3117 while (ecafList != END_ECAF_LIST) {
3118 StgCAF* caf = ecafList;
3119 ecafList = caf->link;
3120 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3121 SET_INFO(caf,&CAF_UNENTERED_info);
3122 caf->value = (StgClosure *)0xdeadbeef;
3123 caf->link = (StgCAF *)0xdeadbeef;
3126 /* Empty out both the table and the list. */
3128 ecafList = END_ECAF_LIST;
3132 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3133 //@subsection Sanity code for CAF garbage collection
3135 /* -----------------------------------------------------------------------------
3136 Sanity code for CAF garbage collection.
3138 With DEBUG turned on, we manage a CAF list in addition to the SRT
3139 mechanism. After GC, we run down the CAF list and blackhole any
3140 CAFs which have been garbage collected. This means we get an error
3141 whenever the program tries to enter a garbage collected CAF.
3143 Any garbage collected CAFs are taken off the CAF list at the same
3145 -------------------------------------------------------------------------- */
3155 const StgInfoTable *info;
3166 ASSERT(info->type == IND_STATIC);
3168 if (STATIC_LINK(info,p) == NULL) {
3169 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3171 SET_INFO(p,&stg_BLACKHOLE_info);
3172 p = STATIC_LINK2(info,p);
3176 pp = &STATIC_LINK2(info,p);
3183 /* fprintf(stderr, "%d CAFs live\n", i); */
3187 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3188 //@subsection Lazy black holing
3190 /* -----------------------------------------------------------------------------
3193 Whenever a thread returns to the scheduler after possibly doing
3194 some work, we have to run down the stack and black-hole all the
3195 closures referred to by update frames.
3196 -------------------------------------------------------------------------- */
3197 //@cindex threadLazyBlackHole
3200 threadLazyBlackHole(StgTSO *tso)
3202 StgUpdateFrame *update_frame;
3203 StgBlockingQueue *bh;
3206 stack_end = &tso->stack[tso->stack_size];
3207 update_frame = tso->su;
3210 switch (get_itbl(update_frame)->type) {
3213 update_frame = ((StgCatchFrame *)update_frame)->link;
3217 bh = (StgBlockingQueue *)update_frame->updatee;
3219 /* if the thunk is already blackholed, it means we've also
3220 * already blackholed the rest of the thunks on this stack,
3221 * so we can stop early.
3223 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3224 * don't interfere with this optimisation.
3226 if (bh->header.info == &stg_BLACKHOLE_info) {
3230 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3231 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3232 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3233 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3235 SET_INFO(bh,&stg_BLACKHOLE_info);
3238 update_frame = update_frame->link;
3242 update_frame = ((StgSeqFrame *)update_frame)->link;
3248 barf("threadPaused");
3253 //@node Stack squeezing, Pausing a thread, Lazy black holing
3254 //@subsection Stack squeezing
3256 /* -----------------------------------------------------------------------------
3259 * Code largely pinched from old RTS, then hacked to bits. We also do
3260 * lazy black holing here.
3262 * -------------------------------------------------------------------------- */
3263 //@cindex threadSqueezeStack
3266 threadSqueezeStack(StgTSO *tso)
3268 lnat displacement = 0;
3269 StgUpdateFrame *frame;
3270 StgUpdateFrame *next_frame; /* Temporally next */
3271 StgUpdateFrame *prev_frame; /* Temporally previous */
3273 rtsBool prev_was_update_frame;
3275 StgUpdateFrame *top_frame;
3276 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3278 void printObj( StgClosure *obj ); // from Printer.c
3280 top_frame = tso->su;
3283 bottom = &(tso->stack[tso->stack_size]);
3286 /* There must be at least one frame, namely the STOP_FRAME.
3288 ASSERT((P_)frame < bottom);
3290 /* Walk down the stack, reversing the links between frames so that
3291 * we can walk back up as we squeeze from the bottom. Note that
3292 * next_frame and prev_frame refer to next and previous as they were
3293 * added to the stack, rather than the way we see them in this
3294 * walk. (It makes the next loop less confusing.)
3296 * Stop if we find an update frame pointing to a black hole
3297 * (see comment in threadLazyBlackHole()).
3301 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3302 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3303 prev_frame = frame->link;
3304 frame->link = next_frame;
3309 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3310 printObj((StgClosure *)prev_frame);
3311 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3314 switch (get_itbl(frame)->type) {
3315 case UPDATE_FRAME: upd_frames++;
3316 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3319 case STOP_FRAME: stop_frames++;
3321 case CATCH_FRAME: catch_frames++;
3323 case SEQ_FRAME: seq_frames++;
3326 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3328 printObj((StgClosure *)prev_frame);
3331 if (get_itbl(frame)->type == UPDATE_FRAME
3332 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3337 /* Now, we're at the bottom. Frame points to the lowest update
3338 * frame on the stack, and its link actually points to the frame
3339 * above. We have to walk back up the stack, squeezing out empty
3340 * update frames and turning the pointers back around on the way
3343 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3344 * we never want to eliminate it anyway. Just walk one step up
3345 * before starting to squeeze. When you get to the topmost frame,
3346 * remember that there are still some words above it that might have
3353 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3356 * Loop through all of the frames (everything except the very
3357 * bottom). Things are complicated by the fact that we have
3358 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3359 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3361 while (frame != NULL) {
3363 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3364 rtsBool is_update_frame;
3366 next_frame = frame->link;
3367 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3370 * 1. both the previous and current frame are update frames
3371 * 2. the current frame is empty
3373 if (prev_was_update_frame && is_update_frame &&
3374 (P_)prev_frame == frame_bottom + displacement) {
3376 /* Now squeeze out the current frame */
3377 StgClosure *updatee_keep = prev_frame->updatee;
3378 StgClosure *updatee_bypass = frame->updatee;
3381 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3385 /* Deal with blocking queues. If both updatees have blocked
3386 * threads, then we should merge the queues into the update
3387 * frame that we're keeping.
3389 * Alternatively, we could just wake them up: they'll just go
3390 * straight to sleep on the proper blackhole! This is less code
3391 * and probably less bug prone, although it's probably much
3394 #if 0 /* do it properly... */
3395 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3396 # error Unimplemented lazy BH warning. (KSW 1999-01)
3398 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3399 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3401 /* Sigh. It has one. Don't lose those threads! */
3402 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3403 /* Urgh. Two queues. Merge them. */
3404 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3406 while (keep_tso->link != END_TSO_QUEUE) {
3407 keep_tso = keep_tso->link;
3409 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3412 /* For simplicity, just swap the BQ for the BH */
3413 P_ temp = updatee_keep;
3415 updatee_keep = updatee_bypass;
3416 updatee_bypass = temp;
3418 /* Record the swap in the kept frame (below) */
3419 prev_frame->updatee = updatee_keep;
3424 TICK_UPD_SQUEEZED();
3425 /* wasn't there something about update squeezing and ticky to be
3426 * sorted out? oh yes: we aren't counting each enter properly
3427 * in this case. See the log somewhere. KSW 1999-04-21
3429 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3431 sp = (P_)frame - 1; /* sp = stuff to slide */
3432 displacement += sizeofW(StgUpdateFrame);
3435 /* No squeeze for this frame */
3436 sp = frame_bottom - 1; /* Keep the current frame */
3438 /* Do lazy black-holing.
3440 if (is_update_frame) {
3441 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3442 if (bh->header.info != &stg_BLACKHOLE_info &&
3443 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3444 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3445 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3446 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3448 SET_INFO(bh,&stg_BLACKHOLE_info);
3452 /* Fix the link in the current frame (should point to the frame below) */
3453 frame->link = prev_frame;
3454 prev_was_update_frame = is_update_frame;
3457 /* Now slide all words from sp up to the next frame */
3459 if (displacement > 0) {
3460 P_ next_frame_bottom;
3462 if (next_frame != NULL)
3463 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3465 next_frame_bottom = tso->sp - 1;
3469 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3473 while (sp >= next_frame_bottom) {
3474 sp[displacement] = *sp;
3478 (P_)prev_frame = (P_)frame + displacement;
3482 tso->sp += displacement;
3483 tso->su = prev_frame;
3486 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3487 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3491 //@node Pausing a thread, Index, Stack squeezing
3492 //@subsection Pausing a thread
3494 /* -----------------------------------------------------------------------------
3497 * We have to prepare for GC - this means doing lazy black holing
3498 * here. We also take the opportunity to do stack squeezing if it's
3500 * -------------------------------------------------------------------------- */
3501 //@cindex threadPaused
3503 threadPaused(StgTSO *tso)
3505 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3506 threadSqueezeStack(tso); /* does black holing too */
3508 threadLazyBlackHole(tso);
3511 /* -----------------------------------------------------------------------------
3513 * -------------------------------------------------------------------------- */
3516 //@cindex printMutOnceList
3518 printMutOnceList(generation *gen)
3520 StgMutClosure *p, *next;
3522 p = gen->mut_once_list;
3525 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3526 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3527 fprintf(stderr, "%p (%s), ",
3528 p, info_type((StgClosure *)p));
3530 fputc('\n', stderr);
3533 //@cindex printMutableList
3535 printMutableList(generation *gen)
3537 StgMutClosure *p, *next;
3542 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3543 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3544 fprintf(stderr, "%p (%s), ",
3545 p, info_type((StgClosure *)p));
3547 fputc('\n', stderr);
3550 //@cindex maybeLarge
3551 static inline rtsBool
3552 maybeLarge(StgClosure *closure)
3554 StgInfoTable *info = get_itbl(closure);
3556 /* closure types that may be found on the new_large_objects list;
3557 see scavenge_large */
3558 return (info->type == MUT_ARR_PTRS ||
3559 info->type == MUT_ARR_PTRS_FROZEN ||
3560 info->type == TSO ||
3561 info->type == ARR_WORDS ||
3568 //@node Index, , Pausing a thread
3572 //* GarbageCollect:: @cindex\s-+GarbageCollect
3573 //* MarkRoot:: @cindex\s-+MarkRoot
3574 //* RevertCAFs:: @cindex\s-+RevertCAFs
3575 //* addBlock:: @cindex\s-+addBlock
3576 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3577 //* copy:: @cindex\s-+copy
3578 //* copyPart:: @cindex\s-+copyPart
3579 //* evacuate:: @cindex\s-+evacuate
3580 //* evacuate_large:: @cindex\s-+evacuate_large
3581 //* gcCAFs:: @cindex\s-+gcCAFs
3582 //* isAlive:: @cindex\s-+isAlive
3583 //* maybeLarge:: @cindex\s-+maybeLarge
3584 //* mkMutCons:: @cindex\s-+mkMutCons
3585 //* printMutOnceList:: @cindex\s-+printMutOnceList
3586 //* printMutableList:: @cindex\s-+printMutableList
3587 //* relocate_TSO:: @cindex\s-+relocate_TSO
3588 //* scavenge:: @cindex\s-+scavenge
3589 //* scavenge_large:: @cindex\s-+scavenge_large
3590 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3591 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3592 //* scavenge_one:: @cindex\s-+scavenge_one
3593 //* scavenge_srt:: @cindex\s-+scavenge_srt
3594 //* scavenge_stack:: @cindex\s-+scavenge_stack
3595 //* scavenge_static:: @cindex\s-+scavenge_static
3596 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3597 //* threadPaused:: @cindex\s-+threadPaused
3598 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3599 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3600 //* upd_evacuee:: @cindex\s-+upd_evacuee
3601 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3602 //* zero_static_object_list:: @cindex\s-+zero_static_object_list