1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.88 2000/11/13 14:41:13 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
46 #if defined(GRAN) || defined(PAR)
47 # include "GranSimRts.h"
48 # include "ParallelRts.h"
52 # include "ParallelDebug.h"
59 #if defined(RTS_GTK_FRONTPANEL)
60 #include "FrontPanel.h"
63 //@node STATIC OBJECT LIST, Static function declarations, Includes
64 //@subsection STATIC OBJECT LIST
66 /* STATIC OBJECT LIST.
69 * We maintain a linked list of static objects that are still live.
70 * The requirements for this list are:
72 * - we need to scan the list while adding to it, in order to
73 * scavenge all the static objects (in the same way that
74 * breadth-first scavenging works for dynamic objects).
76 * - we need to be able to tell whether an object is already on
77 * the list, to break loops.
79 * Each static object has a "static link field", which we use for
80 * linking objects on to the list. We use a stack-type list, consing
81 * objects on the front as they are added (this means that the
82 * scavenge phase is depth-first, not breadth-first, but that
85 * A separate list is kept for objects that have been scavenged
86 * already - this is so that we can zero all the marks afterwards.
88 * An object is on the list if its static link field is non-zero; this
89 * means that we have to mark the end of the list with '1', not NULL.
91 * Extra notes for generational GC:
93 * Each generation has a static object list associated with it. When
94 * collecting generations up to N, we treat the static object lists
95 * from generations > N as roots.
97 * We build up a static object list while collecting generations 0..N,
98 * which is then appended to the static object list of generation N+1.
100 StgClosure* static_objects; /* live static objects */
101 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
103 /* N is the oldest generation being collected, where the generations
104 * are numbered starting at 0. A major GC (indicated by the major_gc
105 * flag) is when we're collecting all generations. We only attempt to
106 * deal with static objects and GC CAFs when doing a major GC.
109 static rtsBool major_gc;
111 /* Youngest generation that objects should be evacuated to in
112 * evacuate(). (Logically an argument to evacuate, but it's static
113 * a lot of the time so we optimise it into a global variable).
119 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
120 static rtsBool weak_done; /* all done for this pass */
122 /* List of all threads during GC
124 static StgTSO *old_all_threads;
125 static StgTSO *resurrected_threads;
127 /* Flag indicating failure to evacuate an object to the desired
130 static rtsBool failed_to_evac;
132 /* Old to-space (used for two-space collector only)
134 bdescr *old_to_space;
136 /* Data used for allocation area sizing.
138 lnat new_blocks; /* blocks allocated during this GC */
139 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
141 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
142 //@subsection Static function declarations
144 /* -----------------------------------------------------------------------------
145 Static function declarations
146 -------------------------------------------------------------------------- */
148 static StgClosure * evacuate ( StgClosure *q );
149 static void zero_static_object_list ( StgClosure* first_static );
150 static void zero_mutable_list ( StgMutClosure *first );
152 static rtsBool traverse_weak_ptr_list ( void );
153 static void cleanup_weak_ptr_list ( StgWeak **list );
155 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
156 static void scavenge_large ( step *step );
157 static void scavenge ( step *step );
158 static void scavenge_static ( void );
159 static void scavenge_mutable_list ( generation *g );
160 static void scavenge_mut_once_list ( generation *g );
163 static void gcCAFs ( void );
166 //@node Garbage Collect, Weak Pointers, Static function declarations
167 //@subsection Garbage Collect
169 /* -----------------------------------------------------------------------------
172 For garbage collecting generation N (and all younger generations):
174 - follow all pointers in the root set. the root set includes all
175 mutable objects in all steps in all generations.
177 - for each pointer, evacuate the object it points to into either
178 + to-space in the next higher step in that generation, if one exists,
179 + if the object's generation == N, then evacuate it to the next
180 generation if one exists, or else to-space in the current
182 + if the object's generation < N, then evacuate it to to-space
183 in the next generation.
185 - repeatedly scavenge to-space from each step in each generation
186 being collected until no more objects can be evacuated.
188 - free from-space in each step, and set from-space = to-space.
190 -------------------------------------------------------------------------- */
191 //@cindex GarbageCollect
193 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
197 lnat live, allocated, collected = 0, copied = 0;
201 CostCentreStack *prev_CCS;
204 #if defined(DEBUG) && defined(GRAN)
205 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
209 /* tell the stats department that we've started a GC */
212 /* attribute any costs to CCS_GC */
218 /* Approximate how much we allocated.
219 * Todo: only when generating stats?
221 allocated = calcAllocated();
223 /* Figure out which generation to collect
225 if (force_major_gc) {
226 N = RtsFlags.GcFlags.generations - 1;
230 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
231 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
235 major_gc = (N == RtsFlags.GcFlags.generations-1);
238 #ifdef RTS_GTK_FRONTPANEL
239 if (RtsFlags.GcFlags.frontpanel) {
240 updateFrontPanelBeforeGC(N);
244 /* check stack sanity *before* GC (ToDo: check all threads) */
246 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
248 IF_DEBUG(sanity, checkFreeListSanity());
250 /* Initialise the static object lists
252 static_objects = END_OF_STATIC_LIST;
253 scavenged_static_objects = END_OF_STATIC_LIST;
255 /* zero the mutable list for the oldest generation (see comment by
256 * zero_mutable_list below).
259 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
262 /* Save the old to-space if we're doing a two-space collection
264 if (RtsFlags.GcFlags.generations == 1) {
265 old_to_space = g0s0->to_space;
266 g0s0->to_space = NULL;
269 /* Keep a count of how many new blocks we allocated during this GC
270 * (used for resizing the allocation area, later).
274 /* Initialise to-space in all the generations/steps that we're
277 for (g = 0; g <= N; g++) {
278 generations[g].mut_once_list = END_MUT_LIST;
279 generations[g].mut_list = END_MUT_LIST;
281 for (s = 0; s < generations[g].n_steps; s++) {
283 /* generation 0, step 0 doesn't need to-space */
284 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
288 /* Get a free block for to-space. Extra blocks will be chained on
292 step = &generations[g].steps[s];
293 ASSERT(step->gen->no == g);
294 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
295 bd->gen = &generations[g];
298 bd->evacuated = 1; /* it's a to-space block */
299 step->hp = bd->start;
300 step->hpLim = step->hp + BLOCK_SIZE_W;
304 step->scan = bd->start;
306 step->new_large_objects = NULL;
307 step->scavenged_large_objects = NULL;
309 /* mark the large objects as not evacuated yet */
310 for (bd = step->large_objects; bd; bd = bd->link) {
316 /* make sure the older generations have at least one block to
317 * allocate into (this makes things easier for copy(), see below.
319 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
320 for (s = 0; s < generations[g].n_steps; s++) {
321 step = &generations[g].steps[s];
322 if (step->hp_bd == NULL) {
324 bd->gen = &generations[g];
327 bd->evacuated = 0; /* *not* a to-space block */
328 step->hp = bd->start;
329 step->hpLim = step->hp + BLOCK_SIZE_W;
335 /* Set the scan pointer for older generations: remember we
336 * still have to scavenge objects that have been promoted. */
337 step->scan = step->hp;
338 step->scan_bd = step->hp_bd;
339 step->to_space = NULL;
341 step->new_large_objects = NULL;
342 step->scavenged_large_objects = NULL;
346 /* -----------------------------------------------------------------------
347 * follow all the roots that we know about:
348 * - mutable lists from each generation > N
349 * we want to *scavenge* these roots, not evacuate them: they're not
350 * going to move in this GC.
351 * Also: do them in reverse generation order. This is because we
352 * often want to promote objects that are pointed to by older
353 * generations early, so we don't have to repeatedly copy them.
354 * Doing the generations in reverse order ensures that we don't end
355 * up in the situation where we want to evac an object to gen 3 and
356 * it has already been evaced to gen 2.
360 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
361 generations[g].saved_mut_list = generations[g].mut_list;
362 generations[g].mut_list = END_MUT_LIST;
365 /* Do the mut-once lists first */
366 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
367 IF_PAR_DEBUG(verbose,
368 printMutOnceList(&generations[g]));
369 scavenge_mut_once_list(&generations[g]);
371 for (st = generations[g].n_steps-1; st >= 0; st--) {
372 scavenge(&generations[g].steps[st]);
376 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
377 IF_PAR_DEBUG(verbose,
378 printMutableList(&generations[g]));
379 scavenge_mutable_list(&generations[g]);
381 for (st = generations[g].n_steps-1; st >= 0; st--) {
382 scavenge(&generations[g].steps[st]);
387 /* follow all the roots that the application knows about.
393 /* And don't forget to mark the TSO if we got here direct from
395 /* Not needed in a seq version?
397 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
401 /* Mark the entries in the GALA table of the parallel system */
402 markLocalGAs(major_gc);
405 /* Mark the weak pointer list, and prepare to detect dead weak
408 old_weak_ptr_list = weak_ptr_list;
409 weak_ptr_list = NULL;
410 weak_done = rtsFalse;
412 /* The all_threads list is like the weak_ptr_list.
413 * See traverse_weak_ptr_list() for the details.
415 old_all_threads = all_threads;
416 all_threads = END_TSO_QUEUE;
417 resurrected_threads = END_TSO_QUEUE;
419 /* Mark the stable pointer table.
421 markStablePtrTable(major_gc);
425 /* ToDo: To fix the caf leak, we need to make the commented out
426 * parts of this code do something sensible - as described in
429 extern void markHugsObjects(void);
434 /* -------------------------------------------------------------------------
435 * Repeatedly scavenge all the areas we know about until there's no
436 * more scavenging to be done.
443 /* scavenge static objects */
444 if (major_gc && static_objects != END_OF_STATIC_LIST) {
446 checkStaticObjects());
450 /* When scavenging the older generations: Objects may have been
451 * evacuated from generations <= N into older generations, and we
452 * need to scavenge these objects. We're going to try to ensure that
453 * any evacuations that occur move the objects into at least the
454 * same generation as the object being scavenged, otherwise we
455 * have to create new entries on the mutable list for the older
459 /* scavenge each step in generations 0..maxgen */
463 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
464 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
465 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
468 step = &generations[gen].steps[st];
470 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
475 if (step->new_large_objects != NULL) {
476 scavenge_large(step);
483 if (flag) { goto loop; }
485 /* must be last... */
486 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
491 /* Final traversal of the weak pointer list (see comment by
492 * cleanUpWeakPtrList below).
494 cleanup_weak_ptr_list(&weak_ptr_list);
496 /* Now see which stable names are still alive.
498 gcStablePtrTable(major_gc);
501 /* Reconstruct the Global Address tables used in GUM */
502 rebuildGAtables(major_gc);
503 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
504 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
507 /* Set the maximum blocks for the oldest generation, based on twice
508 * the amount of live data now, adjusted to fit the maximum heap
511 * This is an approximation, since in the worst case we'll need
512 * twice the amount of live data plus whatever space the other
515 if (RtsFlags.GcFlags.generations > 1) {
517 oldest_gen->max_blocks =
518 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
519 RtsFlags.GcFlags.minOldGenSize);
520 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
521 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
522 if (((int)oldest_gen->max_blocks -
523 (int)oldest_gen->steps[0].to_blocks) <
524 (RtsFlags.GcFlags.pcFreeHeap *
525 RtsFlags.GcFlags.maxHeapSize / 200)) {
532 /* run through all the generations/steps and tidy up
534 copied = new_blocks * BLOCK_SIZE_W;
535 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
538 generations[g].collections++; /* for stats */
541 for (s = 0; s < generations[g].n_steps; s++) {
543 step = &generations[g].steps[s];
545 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
546 /* Tidy the end of the to-space chains */
547 step->hp_bd->free = step->hp;
548 step->hp_bd->link = NULL;
549 /* stats information: how much we copied */
551 copied -= step->hp_bd->start + BLOCK_SIZE_W -
556 /* for generations we collected... */
559 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
561 /* free old memory and shift to-space into from-space for all
562 * the collected steps (except the allocation area). These
563 * freed blocks will probaby be quickly recycled.
565 if (!(g == 0 && s == 0)) {
566 freeChain(step->blocks);
567 step->blocks = step->to_space;
568 step->n_blocks = step->to_blocks;
569 step->to_space = NULL;
571 for (bd = step->blocks; bd != NULL; bd = bd->link) {
572 bd->evacuated = 0; /* now from-space */
576 /* LARGE OBJECTS. The current live large objects are chained on
577 * scavenged_large, having been moved during garbage
578 * collection from large_objects. Any objects left on
579 * large_objects list are therefore dead, so we free them here.
581 for (bd = step->large_objects; bd != NULL; bd = next) {
586 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
589 step->large_objects = step->scavenged_large_objects;
591 /* Set the maximum blocks for this generation, interpolating
592 * between the maximum size of the oldest and youngest
595 * max_blocks = oldgen_max_blocks * G
596 * ----------------------
601 generations[g].max_blocks = (oldest_gen->max_blocks * g)
602 / (RtsFlags.GcFlags.generations-1);
604 generations[g].max_blocks = oldest_gen->max_blocks;
607 /* for older generations... */
610 /* For older generations, we need to append the
611 * scavenged_large_object list (i.e. large objects that have been
612 * promoted during this GC) to the large_object list for that step.
614 for (bd = step->scavenged_large_objects; bd; bd = next) {
617 dbl_link_onto(bd, &step->large_objects);
620 /* add the new blocks we promoted during this GC */
621 step->n_blocks += step->to_blocks;
626 /* Guess the amount of live data for stats. */
629 /* Free the small objects allocated via allocate(), since this will
630 * all have been copied into G0S1 now.
632 if (small_alloc_list != NULL) {
633 freeChain(small_alloc_list);
635 small_alloc_list = NULL;
639 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
641 /* Two-space collector:
642 * Free the old to-space, and estimate the amount of live data.
644 if (RtsFlags.GcFlags.generations == 1) {
647 if (old_to_space != NULL) {
648 freeChain(old_to_space);
650 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
651 bd->evacuated = 0; /* now from-space */
654 /* For a two-space collector, we need to resize the nursery. */
656 /* set up a new nursery. Allocate a nursery size based on a
657 * function of the amount of live data (currently a factor of 2,
658 * should be configurable (ToDo)). Use the blocks from the old
659 * nursery if possible, freeing up any left over blocks.
661 * If we get near the maximum heap size, then adjust our nursery
662 * size accordingly. If the nursery is the same size as the live
663 * data (L), then we need 3L bytes. We can reduce the size of the
664 * nursery to bring the required memory down near 2L bytes.
666 * A normal 2-space collector would need 4L bytes to give the same
667 * performance we get from 3L bytes, reducing to the same
668 * performance at 2L bytes.
670 blocks = g0s0->to_blocks;
672 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
673 RtsFlags.GcFlags.maxHeapSize ) {
674 int adjusted_blocks; /* signed on purpose */
677 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
678 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
679 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
680 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
683 blocks = adjusted_blocks;
686 blocks *= RtsFlags.GcFlags.oldGenFactor;
687 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
688 blocks = RtsFlags.GcFlags.minAllocAreaSize;
691 resizeNursery(blocks);
694 /* Generational collector:
695 * If the user has given us a suggested heap size, adjust our
696 * allocation area to make best use of the memory available.
699 if (RtsFlags.GcFlags.heapSizeSuggestion) {
701 nat needed = calcNeeded(); /* approx blocks needed at next GC */
703 /* Guess how much will be live in generation 0 step 0 next time.
704 * A good approximation is the obtained by finding the
705 * percentage of g0s0 that was live at the last minor GC.
708 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
711 /* Estimate a size for the allocation area based on the
712 * information available. We might end up going slightly under
713 * or over the suggested heap size, but we should be pretty
716 * Formula: suggested - needed
717 * ----------------------------
718 * 1 + g0s0_pcnt_kept/100
720 * where 'needed' is the amount of memory needed at the next
721 * collection for collecting all steps except g0s0.
724 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
725 (100 + (int)g0s0_pcnt_kept);
727 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
728 blocks = RtsFlags.GcFlags.minAllocAreaSize;
731 resizeNursery((nat)blocks);
735 /* mark the garbage collected CAFs as dead */
737 if (major_gc) { gcCAFs(); }
740 /* zero the scavenged static object list */
742 zero_static_object_list(scavenged_static_objects);
749 /* start any pending finalizers */
750 scheduleFinalizers(old_weak_ptr_list);
752 /* send exceptions to any threads which were about to die */
753 resurrectThreads(resurrected_threads);
755 /* check sanity after GC */
756 IF_DEBUG(sanity, checkSanity(N));
758 /* extra GC trace info */
759 IF_DEBUG(gc, stat_describe_gens());
762 /* symbol-table based profiling */
763 /* heapCensus(to_space); */ /* ToDo */
766 /* restore enclosing cost centre */
772 /* check for memory leaks if sanity checking is on */
773 IF_DEBUG(sanity, memInventory());
775 #ifdef RTS_GTK_VISUALS
776 if (RtsFlags.GcFlags.visuals) {
777 updateFrontPanelAfterGC( N, live );
781 /* ok, GC over: tell the stats department what happened. */
782 stat_endGC(allocated, collected, live, copied, N);
785 //@node Weak Pointers, Evacuation, Garbage Collect
786 //@subsection Weak Pointers
788 /* -----------------------------------------------------------------------------
791 traverse_weak_ptr_list is called possibly many times during garbage
792 collection. It returns a flag indicating whether it did any work
793 (i.e. called evacuate on any live pointers).
795 Invariant: traverse_weak_ptr_list is called when the heap is in an
796 idempotent state. That means that there are no pending
797 evacuate/scavenge operations. This invariant helps the weak
798 pointer code decide which weak pointers are dead - if there are no
799 new live weak pointers, then all the currently unreachable ones are
802 For generational GC: we just don't try to finalize weak pointers in
803 older generations than the one we're collecting. This could
804 probably be optimised by keeping per-generation lists of weak
805 pointers, but for a few weak pointers this scheme will work.
806 -------------------------------------------------------------------------- */
807 //@cindex traverse_weak_ptr_list
810 traverse_weak_ptr_list(void)
812 StgWeak *w, **last_w, *next_w;
814 rtsBool flag = rtsFalse;
816 if (weak_done) { return rtsFalse; }
818 /* doesn't matter where we evacuate values/finalizers to, since
819 * these pointers are treated as roots (iff the keys are alive).
823 last_w = &old_weak_ptr_list;
824 for (w = old_weak_ptr_list; w; w = next_w) {
826 /* First, this weak pointer might have been evacuated. If so,
827 * remove the forwarding pointer from the weak_ptr_list.
829 if (get_itbl(w)->type == EVACUATED) {
830 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
834 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
835 * called on a live weak pointer object. Just remove it.
837 if (w->header.info == &stg_DEAD_WEAK_info) {
838 next_w = ((StgDeadWeak *)w)->link;
843 ASSERT(get_itbl(w)->type == WEAK);
845 /* Now, check whether the key is reachable.
847 if ((new = isAlive(w->key))) {
849 /* evacuate the value and finalizer */
850 w->value = evacuate(w->value);
851 w->finalizer = evacuate(w->finalizer);
852 /* remove this weak ptr from the old_weak_ptr list */
854 /* and put it on the new weak ptr list */
856 w->link = weak_ptr_list;
859 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
869 /* Now deal with the all_threads list, which behaves somewhat like
870 * the weak ptr list. If we discover any threads that are about to
871 * become garbage, we wake them up and administer an exception.
874 StgTSO *t, *tmp, *next, **prev;
876 prev = &old_all_threads;
877 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
879 /* Threads which have finished or died get dropped from
882 switch (t->what_next) {
883 case ThreadRelocated:
889 next = t->global_link;
895 /* Threads which have already been determined to be alive are
896 * moved onto the all_threads list.
898 (StgClosure *)tmp = isAlive((StgClosure *)t);
900 next = tmp->global_link;
901 tmp->global_link = all_threads;
905 prev = &(t->global_link);
906 next = t->global_link;
911 /* If we didn't make any changes, then we can go round and kill all
912 * the dead weak pointers. The old_weak_ptr list is used as a list
913 * of pending finalizers later on.
915 if (flag == rtsFalse) {
916 cleanup_weak_ptr_list(&old_weak_ptr_list);
917 for (w = old_weak_ptr_list; w; w = w->link) {
918 w->finalizer = evacuate(w->finalizer);
921 /* And resurrect any threads which were about to become garbage.
924 StgTSO *t, *tmp, *next;
925 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
926 next = t->global_link;
927 (StgClosure *)tmp = evacuate((StgClosure *)t);
928 tmp->global_link = resurrected_threads;
929 resurrected_threads = tmp;
939 /* -----------------------------------------------------------------------------
940 After GC, the live weak pointer list may have forwarding pointers
941 on it, because a weak pointer object was evacuated after being
942 moved to the live weak pointer list. We remove those forwarding
945 Also, we don't consider weak pointer objects to be reachable, but
946 we must nevertheless consider them to be "live" and retain them.
947 Therefore any weak pointer objects which haven't as yet been
948 evacuated need to be evacuated now.
949 -------------------------------------------------------------------------- */
951 //@cindex cleanup_weak_ptr_list
954 cleanup_weak_ptr_list ( StgWeak **list )
956 StgWeak *w, **last_w;
959 for (w = *list; w; w = w->link) {
961 if (get_itbl(w)->type == EVACUATED) {
962 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
966 if (Bdescr((P_)w)->evacuated == 0) {
967 (StgClosure *)w = evacuate((StgClosure *)w);
974 /* -----------------------------------------------------------------------------
975 isAlive determines whether the given closure is still alive (after
976 a garbage collection) or not. It returns the new address of the
977 closure if it is alive, or NULL otherwise.
978 -------------------------------------------------------------------------- */
983 isAlive(StgClosure *p)
985 const StgInfoTable *info;
992 /* ToDo: for static closures, check the static link field.
993 * Problem here is that we sometimes don't set the link field, eg.
994 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
997 /* ignore closures in generations that we're not collecting. */
998 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
1002 switch (info->type) {
1007 case IND_OLDGEN: /* rely on compatible layout with StgInd */
1008 case IND_OLDGEN_PERM:
1009 /* follow indirections */
1010 p = ((StgInd *)p)->indirectee;
1015 return ((StgEvacuated *)p)->evacuee;
1018 size = bco_sizeW((StgBCO*)p);
1022 size = arr_words_sizeW((StgArrWords *)p);
1026 case MUT_ARR_PTRS_FROZEN:
1027 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1031 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1032 p = (StgClosure *)((StgTSO *)p)->link;
1036 size = tso_sizeW((StgTSO *)p);
1038 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1039 && Bdescr((P_)p)->evacuated)
1053 MarkRoot(StgClosure *root)
1055 # if 0 && defined(PAR) && defined(DEBUG)
1056 StgClosure *foo = evacuate(root);
1057 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1058 ASSERT(isAlive(foo)); // must be in to-space
1061 return evacuate(root);
1066 static void addBlock(step *step)
1068 bdescr *bd = allocBlock();
1069 bd->gen = step->gen;
1072 if (step->gen->no <= N) {
1078 step->hp_bd->free = step->hp;
1079 step->hp_bd->link = bd;
1080 step->hp = bd->start;
1081 step->hpLim = step->hp + BLOCK_SIZE_W;
1087 //@cindex upd_evacuee
1089 static __inline__ void
1090 upd_evacuee(StgClosure *p, StgClosure *dest)
1092 p->header.info = &stg_EVACUATED_info;
1093 ((StgEvacuated *)p)->evacuee = dest;
1098 static __inline__ StgClosure *
1099 copy(StgClosure *src, nat size, step *step)
1103 TICK_GC_WORDS_COPIED(size);
1104 /* Find out where we're going, using the handy "to" pointer in
1105 * the step of the source object. If it turns out we need to
1106 * evacuate to an older generation, adjust it here (see comment
1109 if (step->gen->no < evac_gen) {
1110 #ifdef NO_EAGER_PROMOTION
1111 failed_to_evac = rtsTrue;
1113 step = &generations[evac_gen].steps[0];
1117 /* chain a new block onto the to-space for the destination step if
1120 if (step->hp + size >= step->hpLim) {
1124 for(to = step->hp, from = (P_)src; size>0; --size) {
1130 upd_evacuee(src,(StgClosure *)dest);
1131 return (StgClosure *)dest;
1134 /* Special version of copy() for when we only want to copy the info
1135 * pointer of an object, but reserve some padding after it. This is
1136 * used to optimise evacuation of BLACKHOLEs.
1141 static __inline__ StgClosure *
1142 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1146 TICK_GC_WORDS_COPIED(size_to_copy);
1147 if (step->gen->no < evac_gen) {
1148 #ifdef NO_EAGER_PROMOTION
1149 failed_to_evac = rtsTrue;
1151 step = &generations[evac_gen].steps[0];
1155 if (step->hp + size_to_reserve >= step->hpLim) {
1159 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1164 step->hp += size_to_reserve;
1165 upd_evacuee(src,(StgClosure *)dest);
1166 return (StgClosure *)dest;
1169 //@node Evacuation, Scavenging, Weak Pointers
1170 //@subsection Evacuation
1172 /* -----------------------------------------------------------------------------
1173 Evacuate a large object
1175 This just consists of removing the object from the (doubly-linked)
1176 large_alloc_list, and linking it on to the (singly-linked)
1177 new_large_objects list, from where it will be scavenged later.
1179 Convention: bd->evacuated is /= 0 for a large object that has been
1180 evacuated, or 0 otherwise.
1181 -------------------------------------------------------------------------- */
1183 //@cindex evacuate_large
1186 evacuate_large(StgPtr p, rtsBool mutable)
1188 bdescr *bd = Bdescr(p);
1191 /* should point to the beginning of the block */
1192 ASSERT(((W_)p & BLOCK_MASK) == 0);
1194 /* already evacuated? */
1195 if (bd->evacuated) {
1196 /* Don't forget to set the failed_to_evac flag if we didn't get
1197 * the desired destination (see comments in evacuate()).
1199 if (bd->gen->no < evac_gen) {
1200 failed_to_evac = rtsTrue;
1201 TICK_GC_FAILED_PROMOTION();
1207 /* remove from large_object list */
1209 bd->back->link = bd->link;
1210 } else { /* first object in the list */
1211 step->large_objects = bd->link;
1214 bd->link->back = bd->back;
1217 /* link it on to the evacuated large object list of the destination step
1219 step = bd->step->to;
1220 if (step->gen->no < evac_gen) {
1221 #ifdef NO_EAGER_PROMOTION
1222 failed_to_evac = rtsTrue;
1224 step = &generations[evac_gen].steps[0];
1229 bd->gen = step->gen;
1230 bd->link = step->new_large_objects;
1231 step->new_large_objects = bd;
1235 recordMutable((StgMutClosure *)p);
1239 /* -----------------------------------------------------------------------------
1240 Adding a MUT_CONS to an older generation.
1242 This is necessary from time to time when we end up with an
1243 old-to-new generation pointer in a non-mutable object. We defer
1244 the promotion until the next GC.
1245 -------------------------------------------------------------------------- */
1250 mkMutCons(StgClosure *ptr, generation *gen)
1255 step = &gen->steps[0];
1257 /* chain a new block onto the to-space for the destination step if
1260 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1264 q = (StgMutVar *)step->hp;
1265 step->hp += sizeofW(StgMutVar);
1267 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1269 recordOldToNewPtrs((StgMutClosure *)q);
1271 return (StgClosure *)q;
1274 /* -----------------------------------------------------------------------------
1277 This is called (eventually) for every live object in the system.
1279 The caller to evacuate specifies a desired generation in the
1280 evac_gen global variable. The following conditions apply to
1281 evacuating an object which resides in generation M when we're
1282 collecting up to generation N
1286 else evac to step->to
1288 if M < evac_gen evac to evac_gen, step 0
1290 if the object is already evacuated, then we check which generation
1293 if M >= evac_gen do nothing
1294 if M < evac_gen set failed_to_evac flag to indicate that we
1295 didn't manage to evacuate this object into evac_gen.
1297 -------------------------------------------------------------------------- */
1301 evacuate(StgClosure *q)
1306 const StgInfoTable *info;
1309 if (HEAP_ALLOCED(q)) {
1311 if (bd->gen->no > N) {
1312 /* Can't evacuate this object, because it's in a generation
1313 * older than the ones we're collecting. Let's hope that it's
1314 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1316 if (bd->gen->no < evac_gen) {
1318 failed_to_evac = rtsTrue;
1319 TICK_GC_FAILED_PROMOTION();
1323 step = bd->step->to;
1326 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1329 /* make sure the info pointer is into text space */
1330 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1331 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1334 if (info->type==RBH) {
1335 info = REVERT_INFOPTR(info);
1337 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1338 q, info_type(q), info, info_type_by_ip(info)));
1342 switch (info -> type) {
1346 nat size = bco_sizeW((StgBCO*)q);
1348 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1349 evacuate_large((P_)q, rtsFalse);
1352 /* just copy the block */
1353 to = copy(q,size,step);
1359 ASSERT(q->header.info != &stg_MUT_CONS_info);
1361 to = copy(q,sizeW_fromITBL(info),step);
1362 recordMutable((StgMutClosure *)to);
1367 StgWord w = (StgWord)q->payload[0];
1368 if (q->header.info == Czh_con_info &&
1369 /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */
1370 (StgChar)w <= MAX_CHARLIKE) {
1371 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1373 if (q->header.info == Izh_con_info &&
1374 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1375 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1377 /* else, fall through ... */
1383 return copy(q,sizeofW(StgHeader)+1,step);
1385 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1390 #ifdef NO_PROMOTE_THUNKS
1391 if (bd->gen->no == 0 &&
1392 bd->step->no != 0 &&
1393 bd->step->no == bd->gen->n_steps-1) {
1397 return copy(q,sizeofW(StgHeader)+2,step);
1405 return copy(q,sizeofW(StgHeader)+2,step);
1411 case IND_OLDGEN_PERM:
1417 return copy(q,sizeW_fromITBL(info),step);
1420 case SE_CAF_BLACKHOLE:
1423 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1426 to = copy(q,BLACKHOLE_sizeW(),step);
1427 recordMutable((StgMutClosure *)to);
1430 case THUNK_SELECTOR:
1432 const StgInfoTable* selectee_info;
1433 StgClosure* selectee = ((StgSelector*)q)->selectee;
1436 selectee_info = get_itbl(selectee);
1437 switch (selectee_info->type) {
1446 StgWord32 offset = info->layout.selector_offset;
1448 /* check that the size is in range */
1450 (StgWord32)(selectee_info->layout.payload.ptrs +
1451 selectee_info->layout.payload.nptrs));
1453 /* perform the selection! */
1454 q = selectee->payload[offset];
1456 /* if we're already in to-space, there's no need to continue
1457 * with the evacuation, just update the source address with
1458 * a pointer to the (evacuated) constructor field.
1460 if (HEAP_ALLOCED(q)) {
1461 bdescr *bd = Bdescr((P_)q);
1462 if (bd->evacuated) {
1463 if (bd->gen->no < evac_gen) {
1464 failed_to_evac = rtsTrue;
1465 TICK_GC_FAILED_PROMOTION();
1471 /* otherwise, carry on and evacuate this constructor field,
1472 * (but not the constructor itself)
1481 case IND_OLDGEN_PERM:
1482 selectee = ((StgInd *)selectee)->indirectee;
1486 selectee = ((StgCAF *)selectee)->value;
1490 selectee = ((StgEvacuated *)selectee)->evacuee;
1501 case THUNK_SELECTOR:
1502 /* aargh - do recursively???? */
1505 case SE_CAF_BLACKHOLE:
1509 /* not evaluated yet */
1513 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1514 (int)(selectee_info->type));
1517 return copy(q,THUNK_SELECTOR_sizeW(),step);
1521 /* follow chains of indirections, don't evacuate them */
1522 q = ((StgInd*)q)->indirectee;
1526 if (info->srt_len > 0 && major_gc &&
1527 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1528 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1529 static_objects = (StgClosure *)q;
1534 if (info->srt_len > 0 && major_gc &&
1535 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1536 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1537 static_objects = (StgClosure *)q;
1542 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1543 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1544 static_objects = (StgClosure *)q;
1549 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1550 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1551 static_objects = (StgClosure *)q;
1555 case CONSTR_INTLIKE:
1556 case CONSTR_CHARLIKE:
1557 case CONSTR_NOCAF_STATIC:
1558 /* no need to put these on the static linked list, they don't need
1573 /* shouldn't see these */
1574 barf("evacuate: stack frame at %p\n", q);
1578 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1579 * of stack, tagging and all.
1581 * They can be larger than a block in size. Both are only
1582 * allocated via allocate(), so they should be chained on to the
1583 * large_object list.
1586 nat size = pap_sizeW((StgPAP*)q);
1587 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1588 evacuate_large((P_)q, rtsFalse);
1591 return copy(q,size,step);
1596 /* Already evacuated, just return the forwarding address.
1597 * HOWEVER: if the requested destination generation (evac_gen) is
1598 * older than the actual generation (because the object was
1599 * already evacuated to a younger generation) then we have to
1600 * set the failed_to_evac flag to indicate that we couldn't
1601 * manage to promote the object to the desired generation.
1603 if (evac_gen > 0) { /* optimisation */
1604 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1605 if (Bdescr((P_)p)->gen->no < evac_gen) {
1606 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1607 failed_to_evac = rtsTrue;
1608 TICK_GC_FAILED_PROMOTION();
1611 return ((StgEvacuated*)q)->evacuee;
1615 nat size = arr_words_sizeW((StgArrWords *)q);
1617 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1618 evacuate_large((P_)q, rtsFalse);
1621 /* just copy the block */
1622 return copy(q,size,step);
1627 case MUT_ARR_PTRS_FROZEN:
1629 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1631 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1632 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1635 /* just copy the block */
1636 to = copy(q,size,step);
1637 if (info->type == MUT_ARR_PTRS) {
1638 recordMutable((StgMutClosure *)to);
1646 StgTSO *tso = (StgTSO *)q;
1647 nat size = tso_sizeW(tso);
1650 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1652 if (tso->what_next == ThreadRelocated) {
1653 q = (StgClosure *)tso->link;
1657 /* Large TSOs don't get moved, so no relocation is required.
1659 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1660 evacuate_large((P_)q, rtsTrue);
1663 /* To evacuate a small TSO, we need to relocate the update frame
1667 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1669 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1671 /* relocate the stack pointers... */
1672 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1673 new_tso->sp = (StgPtr)new_tso->sp + diff;
1675 relocate_TSO(tso, new_tso);
1677 recordMutable((StgMutClosure *)new_tso);
1678 return (StgClosure *)new_tso;
1683 case RBH: // cf. BLACKHOLE_BQ
1685 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1686 to = copy(q,BLACKHOLE_sizeW(),step);
1687 //ToDo: derive size etc from reverted IP
1688 //to = copy(q,size,step);
1689 recordMutable((StgMutClosure *)to);
1691 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1692 q, info_type(q), to, info_type(to)));
1697 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1698 to = copy(q,sizeofW(StgBlockedFetch),step);
1700 belch("@@ evacuate: %p (%s) to %p (%s)",
1701 q, info_type(q), to, info_type(to)));
1705 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1706 to = copy(q,sizeofW(StgFetchMe),step);
1708 belch("@@ evacuate: %p (%s) to %p (%s)",
1709 q, info_type(q), to, info_type(to)));
1713 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1714 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1716 belch("@@ evacuate: %p (%s) to %p (%s)",
1717 q, info_type(q), to, info_type(to)));
1722 barf("evacuate: strange closure type %d", (int)(info->type));
1728 /* -----------------------------------------------------------------------------
1729 relocate_TSO is called just after a TSO has been copied from src to
1730 dest. It adjusts the update frame list for the new location.
1731 -------------------------------------------------------------------------- */
1732 //@cindex relocate_TSO
1735 relocate_TSO(StgTSO *src, StgTSO *dest)
1742 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1746 while ((P_)su < dest->stack + dest->stack_size) {
1747 switch (get_itbl(su)->type) {
1749 /* GCC actually manages to common up these three cases! */
1752 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1757 cf = (StgCatchFrame *)su;
1758 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1763 sf = (StgSeqFrame *)su;
1764 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1773 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1781 //@node Scavenging, Reverting CAFs, Evacuation
1782 //@subsection Scavenging
1784 //@cindex scavenge_srt
1787 scavenge_srt(const StgInfoTable *info)
1789 StgClosure **srt, **srt_end;
1791 /* evacuate the SRT. If srt_len is zero, then there isn't an
1792 * srt field in the info table. That's ok, because we'll
1793 * never dereference it.
1795 srt = (StgClosure **)(info->srt);
1796 srt_end = srt + info->srt_len;
1797 for (; srt < srt_end; srt++) {
1798 /* Special-case to handle references to closures hiding out in DLLs, since
1799 double indirections required to get at those. The code generator knows
1800 which is which when generating the SRT, so it stores the (indirect)
1801 reference to the DLL closure in the table by first adding one to it.
1802 We check for this here, and undo the addition before evacuating it.
1804 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1805 closure that's fixed at link-time, and no extra magic is required.
1807 #ifdef ENABLE_WIN32_DLL_SUPPORT
1808 if ( (unsigned long)(*srt) & 0x1 ) {
1809 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1819 /* -----------------------------------------------------------------------------
1821 -------------------------------------------------------------------------- */
1824 scavengeTSO (StgTSO *tso)
1826 /* chase the link field for any TSOs on the same queue */
1827 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1828 if ( tso->why_blocked == BlockedOnMVar
1829 || tso->why_blocked == BlockedOnBlackHole
1830 || tso->why_blocked == BlockedOnException
1832 || tso->why_blocked == BlockedOnGA
1833 || tso->why_blocked == BlockedOnGA_NoSend
1836 tso->block_info.closure = evacuate(tso->block_info.closure);
1838 if ( tso->blocked_exceptions != NULL ) {
1839 tso->blocked_exceptions =
1840 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1842 /* scavenge this thread's stack */
1843 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1846 /* -----------------------------------------------------------------------------
1847 Scavenge a given step until there are no more objects in this step
1850 evac_gen is set by the caller to be either zero (for a step in a
1851 generation < N) or G where G is the generation of the step being
1854 We sometimes temporarily change evac_gen back to zero if we're
1855 scavenging a mutable object where early promotion isn't such a good
1857 -------------------------------------------------------------------------- */
1861 scavenge(step *step)
1864 const StgInfoTable *info;
1866 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1871 failed_to_evac = rtsFalse;
1873 /* scavenge phase - standard breadth-first scavenging of the
1877 while (bd != step->hp_bd || p < step->hp) {
1879 /* If we're at the end of this block, move on to the next block */
1880 if (bd != step->hp_bd && p == bd->free) {
1886 q = p; /* save ptr to object */
1888 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1889 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1891 info = get_itbl((StgClosure *)p);
1893 if (info->type==RBH)
1894 info = REVERT_INFOPTR(info);
1897 switch (info -> type) {
1901 StgBCO* bco = (StgBCO *)p;
1903 for (i = 0; i < bco->n_ptrs; i++) {
1904 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1906 p += bco_sizeW(bco);
1911 /* treat MVars specially, because we don't want to evacuate the
1912 * mut_link field in the middle of the closure.
1915 StgMVar *mvar = ((StgMVar *)p);
1917 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1918 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1919 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1920 p += sizeofW(StgMVar);
1921 evac_gen = saved_evac_gen;
1929 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1930 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1931 p += sizeofW(StgHeader) + 2;
1936 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1937 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1943 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1944 p += sizeofW(StgHeader) + 1;
1949 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1955 p += sizeofW(StgHeader) + 1;
1962 p += sizeofW(StgHeader) + 2;
1969 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1970 p += sizeofW(StgHeader) + 2;
1985 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1986 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1987 (StgClosure *)*p = evacuate((StgClosure *)*p);
1989 p += info->layout.payload.nptrs;
1994 if (step->gen->no != 0) {
1995 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
1998 case IND_OLDGEN_PERM:
1999 ((StgIndOldGen *)p)->indirectee =
2000 evacuate(((StgIndOldGen *)p)->indirectee);
2001 if (failed_to_evac) {
2002 failed_to_evac = rtsFalse;
2003 recordOldToNewPtrs((StgMutClosure *)p);
2005 p += sizeofW(StgIndOldGen);
2010 StgCAF *caf = (StgCAF *)p;
2012 caf->body = evacuate(caf->body);
2013 if (failed_to_evac) {
2014 failed_to_evac = rtsFalse;
2015 recordOldToNewPtrs((StgMutClosure *)p);
2017 caf->mut_link = NULL;
2019 p += sizeofW(StgCAF);
2025 StgCAF *caf = (StgCAF *)p;
2027 caf->body = evacuate(caf->body);
2028 caf->value = evacuate(caf->value);
2029 if (failed_to_evac) {
2030 failed_to_evac = rtsFalse;
2031 recordOldToNewPtrs((StgMutClosure *)p);
2033 caf->mut_link = NULL;
2035 p += sizeofW(StgCAF);
2040 /* ignore MUT_CONSs */
2041 if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) {
2043 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2044 evac_gen = saved_evac_gen;
2046 p += sizeofW(StgMutVar);
2050 case SE_CAF_BLACKHOLE:
2053 p += BLACKHOLE_sizeW();
2058 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2059 (StgClosure *)bh->blocking_queue =
2060 evacuate((StgClosure *)bh->blocking_queue);
2061 if (failed_to_evac) {
2062 failed_to_evac = rtsFalse;
2063 recordMutable((StgMutClosure *)bh);
2065 p += BLACKHOLE_sizeW();
2069 case THUNK_SELECTOR:
2071 StgSelector *s = (StgSelector *)p;
2072 s->selectee = evacuate(s->selectee);
2073 p += THUNK_SELECTOR_sizeW();
2079 barf("scavenge:IND???\n");
2081 case CONSTR_INTLIKE:
2082 case CONSTR_CHARLIKE:
2084 case CONSTR_NOCAF_STATIC:
2088 /* Shouldn't see a static object here. */
2089 barf("scavenge: STATIC object\n");
2101 /* Shouldn't see stack frames here. */
2102 barf("scavenge: stack frame\n");
2104 case AP_UPD: /* same as PAPs */
2106 /* Treat a PAP just like a section of stack, not forgetting to
2107 * evacuate the function pointer too...
2110 StgPAP* pap = (StgPAP *)p;
2112 pap->fun = evacuate(pap->fun);
2113 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2114 p += pap_sizeW(pap);
2119 /* nothing to follow */
2120 p += arr_words_sizeW((StgArrWords *)p);
2124 /* follow everything */
2128 evac_gen = 0; /* repeatedly mutable */
2129 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2130 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2131 (StgClosure *)*p = evacuate((StgClosure *)*p);
2133 evac_gen = saved_evac_gen;
2137 case MUT_ARR_PTRS_FROZEN:
2138 /* follow everything */
2140 StgPtr start = p, next;
2142 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2143 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2144 (StgClosure *)*p = evacuate((StgClosure *)*p);
2146 if (failed_to_evac) {
2147 /* we can do this easier... */
2148 recordMutable((StgMutClosure *)start);
2149 failed_to_evac = rtsFalse;
2156 StgTSO *tso = (StgTSO *)p;
2159 evac_gen = saved_evac_gen;
2160 p += tso_sizeW(tso);
2165 case RBH: // cf. BLACKHOLE_BQ
2167 // nat size, ptrs, nonptrs, vhs;
2169 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2170 StgRBH *rbh = (StgRBH *)p;
2171 (StgClosure *)rbh->blocking_queue =
2172 evacuate((StgClosure *)rbh->blocking_queue);
2173 if (failed_to_evac) {
2174 failed_to_evac = rtsFalse;
2175 recordMutable((StgMutClosure *)rbh);
2178 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2179 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2180 // ToDo: use size of reverted closure here!
2181 p += BLACKHOLE_sizeW();
2187 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2188 /* follow the pointer to the node which is being demanded */
2189 (StgClosure *)bf->node =
2190 evacuate((StgClosure *)bf->node);
2191 /* follow the link to the rest of the blocking queue */
2192 (StgClosure *)bf->link =
2193 evacuate((StgClosure *)bf->link);
2194 if (failed_to_evac) {
2195 failed_to_evac = rtsFalse;
2196 recordMutable((StgMutClosure *)bf);
2199 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2200 bf, info_type((StgClosure *)bf),
2201 bf->node, info_type(bf->node)));
2202 p += sizeofW(StgBlockedFetch);
2208 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2209 p, info_type((StgClosure *)p)));
2210 p += sizeofW(StgFetchMe);
2211 break; // nothing to do in this case
2213 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2215 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2216 (StgClosure *)fmbq->blocking_queue =
2217 evacuate((StgClosure *)fmbq->blocking_queue);
2218 if (failed_to_evac) {
2219 failed_to_evac = rtsFalse;
2220 recordMutable((StgMutClosure *)fmbq);
2223 belch("@@ scavenge: %p (%s) exciting, isn't it",
2224 p, info_type((StgClosure *)p)));
2225 p += sizeofW(StgFetchMeBlockingQueue);
2231 barf("scavenge: unimplemented/strange closure type %d @ %p",
2235 barf("scavenge: unimplemented/strange closure type %d @ %p",
2239 /* If we didn't manage to promote all the objects pointed to by
2240 * the current object, then we have to designate this object as
2241 * mutable (because it contains old-to-new generation pointers).
2243 if (failed_to_evac) {
2244 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2245 failed_to_evac = rtsFalse;
2253 /* -----------------------------------------------------------------------------
2254 Scavenge one object.
2256 This is used for objects that are temporarily marked as mutable
2257 because they contain old-to-new generation pointers. Only certain
2258 objects can have this property.
2259 -------------------------------------------------------------------------- */
2260 //@cindex scavenge_one
2263 scavenge_one(StgClosure *p)
2265 const StgInfoTable *info;
2268 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2269 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2274 if (info->type==RBH)
2275 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2278 switch (info -> type) {
2281 case FUN_1_0: /* hardly worth specialising these guys */
2301 case IND_OLDGEN_PERM:
2306 end = (P_)p->payload + info->layout.payload.ptrs;
2307 for (q = (P_)p->payload; q < end; q++) {
2308 (StgClosure *)*q = evacuate((StgClosure *)*q);
2314 case SE_CAF_BLACKHOLE:
2319 case THUNK_SELECTOR:
2321 StgSelector *s = (StgSelector *)p;
2322 s->selectee = evacuate(s->selectee);
2326 case AP_UPD: /* same as PAPs */
2328 /* Treat a PAP just like a section of stack, not forgetting to
2329 * evacuate the function pointer too...
2332 StgPAP* pap = (StgPAP *)p;
2334 pap->fun = evacuate(pap->fun);
2335 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2340 /* This might happen if for instance a MUT_CONS was pointing to a
2341 * THUNK which has since been updated. The IND_OLDGEN will
2342 * be on the mutable list anyway, so we don't need to do anything
2348 barf("scavenge_one: strange object %d", (int)(info->type));
2351 no_luck = failed_to_evac;
2352 failed_to_evac = rtsFalse;
2357 /* -----------------------------------------------------------------------------
2358 Scavenging mutable lists.
2360 We treat the mutable list of each generation > N (i.e. all the
2361 generations older than the one being collected) as roots. We also
2362 remove non-mutable objects from the mutable list at this point.
2363 -------------------------------------------------------------------------- */
2364 //@cindex scavenge_mut_once_list
2367 scavenge_mut_once_list(generation *gen)
2369 const StgInfoTable *info;
2370 StgMutClosure *p, *next, *new_list;
2372 p = gen->mut_once_list;
2373 new_list = END_MUT_LIST;
2377 failed_to_evac = rtsFalse;
2379 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2381 /* make sure the info pointer is into text space */
2382 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2383 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2387 if (info->type==RBH)
2388 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2390 switch(info->type) {
2393 case IND_OLDGEN_PERM:
2395 /* Try to pull the indirectee into this generation, so we can
2396 * remove the indirection from the mutable list.
2398 ((StgIndOldGen *)p)->indirectee =
2399 evacuate(((StgIndOldGen *)p)->indirectee);
2402 if (RtsFlags.DebugFlags.gc)
2403 /* Debugging code to print out the size of the thing we just
2407 StgPtr start = gen->steps[0].scan;
2408 bdescr *start_bd = gen->steps[0].scan_bd;
2410 scavenge(&gen->steps[0]);
2411 if (start_bd != gen->steps[0].scan_bd) {
2412 size += (P_)BLOCK_ROUND_UP(start) - start;
2413 start_bd = start_bd->link;
2414 while (start_bd != gen->steps[0].scan_bd) {
2415 size += BLOCK_SIZE_W;
2416 start_bd = start_bd->link;
2418 size += gen->steps[0].scan -
2419 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2421 size = gen->steps[0].scan - start;
2423 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2427 /* failed_to_evac might happen if we've got more than two
2428 * generations, we're collecting only generation 0, the
2429 * indirection resides in generation 2 and the indirectee is
2432 if (failed_to_evac) {
2433 failed_to_evac = rtsFalse;
2434 p->mut_link = new_list;
2437 /* the mut_link field of an IND_STATIC is overloaded as the
2438 * static link field too (it just so happens that we don't need
2439 * both at the same time), so we need to NULL it out when
2440 * removing this object from the mutable list because the static
2441 * link fields are all assumed to be NULL before doing a major
2449 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2450 * it from the mutable list if possible by promoting whatever it
2453 ASSERT(p->header.info == &stg_MUT_CONS_info);
2454 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2455 /* didn't manage to promote everything, so put the
2456 * MUT_CONS back on the list.
2458 p->mut_link = new_list;
2465 StgCAF *caf = (StgCAF *)p;
2466 caf->body = evacuate(caf->body);
2467 caf->value = evacuate(caf->value);
2468 if (failed_to_evac) {
2469 failed_to_evac = rtsFalse;
2470 p->mut_link = new_list;
2480 StgCAF *caf = (StgCAF *)p;
2481 caf->body = evacuate(caf->body);
2482 if (failed_to_evac) {
2483 failed_to_evac = rtsFalse;
2484 p->mut_link = new_list;
2493 /* shouldn't have anything else on the mutables list */
2494 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2498 gen->mut_once_list = new_list;
2501 //@cindex scavenge_mutable_list
2504 scavenge_mutable_list(generation *gen)
2506 const StgInfoTable *info;
2507 StgMutClosure *p, *next;
2509 p = gen->saved_mut_list;
2513 failed_to_evac = rtsFalse;
2515 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2517 /* make sure the info pointer is into text space */
2518 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2519 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2523 if (info->type==RBH)
2524 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2526 switch(info->type) {
2528 case MUT_ARR_PTRS_FROZEN:
2529 /* remove this guy from the mutable list, but follow the ptrs
2530 * anyway (and make sure they get promoted to this gen).
2535 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2537 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2538 (StgClosure *)*q = evacuate((StgClosure *)*q);
2542 if (failed_to_evac) {
2543 failed_to_evac = rtsFalse;
2544 p->mut_link = gen->mut_list;
2551 /* follow everything */
2552 p->mut_link = gen->mut_list;
2557 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2558 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2559 (StgClosure *)*q = evacuate((StgClosure *)*q);
2565 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2566 * it from the mutable list if possible by promoting whatever it
2569 ASSERT(p->header.info != &stg_MUT_CONS_info);
2570 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2571 p->mut_link = gen->mut_list;
2577 StgMVar *mvar = (StgMVar *)p;
2578 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2579 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2580 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2581 p->mut_link = gen->mut_list;
2588 StgTSO *tso = (StgTSO *)p;
2592 /* Don't take this TSO off the mutable list - it might still
2593 * point to some younger objects (because we set evac_gen to 0
2596 tso->mut_link = gen->mut_list;
2597 gen->mut_list = (StgMutClosure *)tso;
2603 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2604 (StgClosure *)bh->blocking_queue =
2605 evacuate((StgClosure *)bh->blocking_queue);
2606 p->mut_link = gen->mut_list;
2611 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2614 case IND_OLDGEN_PERM:
2615 /* Try to pull the indirectee into this generation, so we can
2616 * remove the indirection from the mutable list.
2619 ((StgIndOldGen *)p)->indirectee =
2620 evacuate(((StgIndOldGen *)p)->indirectee);
2623 if (failed_to_evac) {
2624 failed_to_evac = rtsFalse;
2625 p->mut_link = gen->mut_once_list;
2626 gen->mut_once_list = p;
2633 // HWL: check whether all of these are necessary
2635 case RBH: // cf. BLACKHOLE_BQ
2637 // nat size, ptrs, nonptrs, vhs;
2639 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2640 StgRBH *rbh = (StgRBH *)p;
2641 (StgClosure *)rbh->blocking_queue =
2642 evacuate((StgClosure *)rbh->blocking_queue);
2643 if (failed_to_evac) {
2644 failed_to_evac = rtsFalse;
2645 recordMutable((StgMutClosure *)rbh);
2647 // ToDo: use size of reverted closure here!
2648 p += BLACKHOLE_sizeW();
2654 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2655 /* follow the pointer to the node which is being demanded */
2656 (StgClosure *)bf->node =
2657 evacuate((StgClosure *)bf->node);
2658 /* follow the link to the rest of the blocking queue */
2659 (StgClosure *)bf->link =
2660 evacuate((StgClosure *)bf->link);
2661 if (failed_to_evac) {
2662 failed_to_evac = rtsFalse;
2663 recordMutable((StgMutClosure *)bf);
2665 p += sizeofW(StgBlockedFetch);
2670 p += sizeofW(StgFetchMe);
2671 break; // nothing to do in this case
2673 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2675 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2676 (StgClosure *)fmbq->blocking_queue =
2677 evacuate((StgClosure *)fmbq->blocking_queue);
2678 if (failed_to_evac) {
2679 failed_to_evac = rtsFalse;
2680 recordMutable((StgMutClosure *)fmbq);
2682 p += sizeofW(StgFetchMeBlockingQueue);
2688 /* shouldn't have anything else on the mutables list */
2689 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2694 //@cindex scavenge_static
2697 scavenge_static(void)
2699 StgClosure* p = static_objects;
2700 const StgInfoTable *info;
2702 /* Always evacuate straight to the oldest generation for static
2704 evac_gen = oldest_gen->no;
2706 /* keep going until we've scavenged all the objects on the linked
2708 while (p != END_OF_STATIC_LIST) {
2712 if (info->type==RBH)
2713 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2715 /* make sure the info pointer is into text space */
2716 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2717 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2719 /* Take this object *off* the static_objects list,
2720 * and put it on the scavenged_static_objects list.
2722 static_objects = STATIC_LINK(info,p);
2723 STATIC_LINK(info,p) = scavenged_static_objects;
2724 scavenged_static_objects = p;
2726 switch (info -> type) {
2730 StgInd *ind = (StgInd *)p;
2731 ind->indirectee = evacuate(ind->indirectee);
2733 /* might fail to evacuate it, in which case we have to pop it
2734 * back on the mutable list (and take it off the
2735 * scavenged_static list because the static link and mut link
2736 * pointers are one and the same).
2738 if (failed_to_evac) {
2739 failed_to_evac = rtsFalse;
2740 scavenged_static_objects = STATIC_LINK(info,p);
2741 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2742 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2756 next = (P_)p->payload + info->layout.payload.ptrs;
2757 /* evacuate the pointers */
2758 for (q = (P_)p->payload; q < next; q++) {
2759 (StgClosure *)*q = evacuate((StgClosure *)*q);
2765 barf("scavenge_static: strange closure %d", (int)(info->type));
2768 ASSERT(failed_to_evac == rtsFalse);
2770 /* get the next static object from the list. Remember, there might
2771 * be more stuff on this list now that we've done some evacuating!
2772 * (static_objects is a global)
2778 /* -----------------------------------------------------------------------------
2779 scavenge_stack walks over a section of stack and evacuates all the
2780 objects pointed to by it. We can use the same code for walking
2781 PAPs, since these are just sections of copied stack.
2782 -------------------------------------------------------------------------- */
2783 //@cindex scavenge_stack
2786 scavenge_stack(StgPtr p, StgPtr stack_end)
2789 const StgInfoTable* info;
2792 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2795 * Each time around this loop, we are looking at a chunk of stack
2796 * that starts with either a pending argument section or an
2797 * activation record.
2800 while (p < stack_end) {
2803 /* If we've got a tag, skip over that many words on the stack */
2804 if (IS_ARG_TAG((W_)q)) {
2809 /* Is q a pointer to a closure?
2811 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2813 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2814 ASSERT(closure_STATIC((StgClosure *)q));
2816 /* otherwise, must be a pointer into the allocation space. */
2819 (StgClosure *)*p = evacuate((StgClosure *)q);
2825 * Otherwise, q must be the info pointer of an activation
2826 * record. All activation records have 'bitmap' style layout
2829 info = get_itbl((StgClosure *)p);
2831 switch (info->type) {
2833 /* Dynamic bitmap: the mask is stored on the stack */
2835 bitmap = ((StgRetDyn *)p)->liveness;
2836 p = (P_)&((StgRetDyn *)p)->payload[0];
2839 /* probably a slow-entry point return address: */
2847 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2848 old_p, p, old_p+1));
2850 p++; /* what if FHS!=1 !? -- HWL */
2855 /* Specialised code for update frames, since they're so common.
2856 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2857 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2861 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2863 nat type = get_itbl(frame->updatee)->type;
2865 p += sizeofW(StgUpdateFrame);
2866 if (type == EVACUATED) {
2867 frame->updatee = evacuate(frame->updatee);
2870 bdescr *bd = Bdescr((P_)frame->updatee);
2872 if (bd->gen->no > N) {
2873 if (bd->gen->no < evac_gen) {
2874 failed_to_evac = rtsTrue;
2879 /* Don't promote blackholes */
2881 if (!(step->gen->no == 0 &&
2883 step->no == step->gen->n_steps-1)) {
2890 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2891 sizeofW(StgHeader), step);
2892 frame->updatee = to;
2895 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2896 frame->updatee = to;
2897 recordMutable((StgMutClosure *)to);
2900 /* will never be SE_{,CAF_}BLACKHOLE, since we
2901 don't push an update frame for single-entry thunks. KSW 1999-01. */
2902 barf("scavenge_stack: UPDATE_FRAME updatee");
2907 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2914 bitmap = info->layout.bitmap;
2916 /* this assumes that the payload starts immediately after the info-ptr */
2918 while (bitmap != 0) {
2919 if ((bitmap & 1) == 0) {
2920 (StgClosure *)*p = evacuate((StgClosure *)*p);
2923 bitmap = bitmap >> 1;
2930 /* large bitmap (> 32 entries) */
2935 StgLargeBitmap *large_bitmap;
2938 large_bitmap = info->layout.large_bitmap;
2941 for (i=0; i<large_bitmap->size; i++) {
2942 bitmap = large_bitmap->bitmap[i];
2943 q = p + sizeof(W_) * 8;
2944 while (bitmap != 0) {
2945 if ((bitmap & 1) == 0) {
2946 (StgClosure *)*p = evacuate((StgClosure *)*p);
2949 bitmap = bitmap >> 1;
2951 if (i+1 < large_bitmap->size) {
2953 (StgClosure *)*p = evacuate((StgClosure *)*p);
2959 /* and don't forget to follow the SRT */
2964 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2969 /*-----------------------------------------------------------------------------
2970 scavenge the large object list.
2972 evac_gen set by caller; similar games played with evac_gen as with
2973 scavenge() - see comment at the top of scavenge(). Most large
2974 objects are (repeatedly) mutable, so most of the time evac_gen will
2976 --------------------------------------------------------------------------- */
2977 //@cindex scavenge_large
2980 scavenge_large(step *step)
2984 const StgInfoTable* info;
2985 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2987 evac_gen = 0; /* most objects are mutable */
2988 bd = step->new_large_objects;
2990 for (; bd != NULL; bd = step->new_large_objects) {
2992 /* take this object *off* the large objects list and put it on
2993 * the scavenged large objects list. This is so that we can
2994 * treat new_large_objects as a stack and push new objects on
2995 * the front when evacuating.
2997 step->new_large_objects = bd->link;
2998 dbl_link_onto(bd, &step->scavenged_large_objects);
3001 info = get_itbl((StgClosure *)p);
3003 switch (info->type) {
3005 /* only certain objects can be "large"... */
3008 /* nothing to follow */
3012 /* follow everything */
3016 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3017 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3018 (StgClosure *)*p = evacuate((StgClosure *)*p);
3023 case MUT_ARR_PTRS_FROZEN:
3024 /* follow everything */
3026 StgPtr start = p, next;
3028 evac_gen = saved_evac_gen; /* not really mutable */
3029 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3030 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3031 (StgClosure *)*p = evacuate((StgClosure *)*p);
3034 if (failed_to_evac) {
3035 recordMutable((StgMutClosure *)start);
3042 StgBCO* bco = (StgBCO *)p;
3044 evac_gen = saved_evac_gen;
3045 for (i = 0; i < bco->n_ptrs; i++) {
3046 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
3053 scavengeTSO((StgTSO *)p);
3059 StgPAP* pap = (StgPAP *)p;
3061 evac_gen = saved_evac_gen; /* not really mutable */
3062 pap->fun = evacuate(pap->fun);
3063 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3069 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3074 //@cindex zero_static_object_list
3077 zero_static_object_list(StgClosure* first_static)
3081 const StgInfoTable *info;
3083 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3085 link = STATIC_LINK(info, p);
3086 STATIC_LINK(info,p) = NULL;
3090 /* This function is only needed because we share the mutable link
3091 * field with the static link field in an IND_STATIC, so we have to
3092 * zero the mut_link field before doing a major GC, which needs the
3093 * static link field.
3095 * It doesn't do any harm to zero all the mutable link fields on the
3098 //@cindex zero_mutable_list
3101 zero_mutable_list( StgMutClosure *first )
3103 StgMutClosure *next, *c;
3105 for (c = first; c != END_MUT_LIST; c = next) {
3111 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3112 //@subsection Reverting CAFs
3114 /* -----------------------------------------------------------------------------
3116 -------------------------------------------------------------------------- */
3117 //@cindex RevertCAFs
3119 void RevertCAFs(void)
3124 /* Deal with CAFs created by compiled code. */
3125 for (i = 0; i < usedECafTable; i++) {
3126 SET_INFO( (StgInd*)(ecafTable[i].closure), ecafTable[i].origItbl );
3127 ((StgInd*)(ecafTable[i].closure))->indirectee = 0;
3130 /* Deal with CAFs created by the interpreter. */
3131 while (ecafList != END_ECAF_LIST) {
3132 StgCAF* caf = ecafList;
3133 ecafList = caf->link;
3134 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3135 SET_INFO(caf,&CAF_UNENTERED_info);
3136 caf->value = (StgClosure *)0xdeadbeef;
3137 caf->link = (StgCAF *)0xdeadbeef;
3140 /* Empty out both the table and the list. */
3142 ecafList = END_ECAF_LIST;
3146 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3147 //@subsection Sanity code for CAF garbage collection
3149 /* -----------------------------------------------------------------------------
3150 Sanity code for CAF garbage collection.
3152 With DEBUG turned on, we manage a CAF list in addition to the SRT
3153 mechanism. After GC, we run down the CAF list and blackhole any
3154 CAFs which have been garbage collected. This means we get an error
3155 whenever the program tries to enter a garbage collected CAF.
3157 Any garbage collected CAFs are taken off the CAF list at the same
3159 -------------------------------------------------------------------------- */
3169 const StgInfoTable *info;
3180 ASSERT(info->type == IND_STATIC);
3182 if (STATIC_LINK(info,p) == NULL) {
3183 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3185 SET_INFO(p,&stg_BLACKHOLE_info);
3186 p = STATIC_LINK2(info,p);
3190 pp = &STATIC_LINK2(info,p);
3197 /* fprintf(stderr, "%d CAFs live\n", i); */
3201 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3202 //@subsection Lazy black holing
3204 /* -----------------------------------------------------------------------------
3207 Whenever a thread returns to the scheduler after possibly doing
3208 some work, we have to run down the stack and black-hole all the
3209 closures referred to by update frames.
3210 -------------------------------------------------------------------------- */
3211 //@cindex threadLazyBlackHole
3214 threadLazyBlackHole(StgTSO *tso)
3216 StgUpdateFrame *update_frame;
3217 StgBlockingQueue *bh;
3220 stack_end = &tso->stack[tso->stack_size];
3221 update_frame = tso->su;
3224 switch (get_itbl(update_frame)->type) {
3227 update_frame = ((StgCatchFrame *)update_frame)->link;
3231 bh = (StgBlockingQueue *)update_frame->updatee;
3233 /* if the thunk is already blackholed, it means we've also
3234 * already blackholed the rest of the thunks on this stack,
3235 * so we can stop early.
3237 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3238 * don't interfere with this optimisation.
3240 if (bh->header.info == &stg_BLACKHOLE_info) {
3244 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3245 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3246 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3247 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3249 SET_INFO(bh,&stg_BLACKHOLE_info);
3252 update_frame = update_frame->link;
3256 update_frame = ((StgSeqFrame *)update_frame)->link;
3262 barf("threadPaused");
3267 //@node Stack squeezing, Pausing a thread, Lazy black holing
3268 //@subsection Stack squeezing
3270 /* -----------------------------------------------------------------------------
3273 * Code largely pinched from old RTS, then hacked to bits. We also do
3274 * lazy black holing here.
3276 * -------------------------------------------------------------------------- */
3277 //@cindex threadSqueezeStack
3280 threadSqueezeStack(StgTSO *tso)
3282 lnat displacement = 0;
3283 StgUpdateFrame *frame;
3284 StgUpdateFrame *next_frame; /* Temporally next */
3285 StgUpdateFrame *prev_frame; /* Temporally previous */
3287 rtsBool prev_was_update_frame;
3289 StgUpdateFrame *top_frame;
3290 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3292 void printObj( StgClosure *obj ); // from Printer.c
3294 top_frame = tso->su;
3297 bottom = &(tso->stack[tso->stack_size]);
3300 /* There must be at least one frame, namely the STOP_FRAME.
3302 ASSERT((P_)frame < bottom);
3304 /* Walk down the stack, reversing the links between frames so that
3305 * we can walk back up as we squeeze from the bottom. Note that
3306 * next_frame and prev_frame refer to next and previous as they were
3307 * added to the stack, rather than the way we see them in this
3308 * walk. (It makes the next loop less confusing.)
3310 * Stop if we find an update frame pointing to a black hole
3311 * (see comment in threadLazyBlackHole()).
3315 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3316 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3317 prev_frame = frame->link;
3318 frame->link = next_frame;
3323 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3324 printObj((StgClosure *)prev_frame);
3325 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3328 switch (get_itbl(frame)->type) {
3329 case UPDATE_FRAME: upd_frames++;
3330 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3333 case STOP_FRAME: stop_frames++;
3335 case CATCH_FRAME: catch_frames++;
3337 case SEQ_FRAME: seq_frames++;
3340 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3342 printObj((StgClosure *)prev_frame);
3345 if (get_itbl(frame)->type == UPDATE_FRAME
3346 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3351 /* Now, we're at the bottom. Frame points to the lowest update
3352 * frame on the stack, and its link actually points to the frame
3353 * above. We have to walk back up the stack, squeezing out empty
3354 * update frames and turning the pointers back around on the way
3357 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3358 * we never want to eliminate it anyway. Just walk one step up
3359 * before starting to squeeze. When you get to the topmost frame,
3360 * remember that there are still some words above it that might have
3367 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3370 * Loop through all of the frames (everything except the very
3371 * bottom). Things are complicated by the fact that we have
3372 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3373 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3375 while (frame != NULL) {
3377 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3378 rtsBool is_update_frame;
3380 next_frame = frame->link;
3381 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3384 * 1. both the previous and current frame are update frames
3385 * 2. the current frame is empty
3387 if (prev_was_update_frame && is_update_frame &&
3388 (P_)prev_frame == frame_bottom + displacement) {
3390 /* Now squeeze out the current frame */
3391 StgClosure *updatee_keep = prev_frame->updatee;
3392 StgClosure *updatee_bypass = frame->updatee;
3395 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3399 /* Deal with blocking queues. If both updatees have blocked
3400 * threads, then we should merge the queues into the update
3401 * frame that we're keeping.
3403 * Alternatively, we could just wake them up: they'll just go
3404 * straight to sleep on the proper blackhole! This is less code
3405 * and probably less bug prone, although it's probably much
3408 #if 0 /* do it properly... */
3409 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3410 # error Unimplemented lazy BH warning. (KSW 1999-01)
3412 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3413 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3415 /* Sigh. It has one. Don't lose those threads! */
3416 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3417 /* Urgh. Two queues. Merge them. */
3418 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3420 while (keep_tso->link != END_TSO_QUEUE) {
3421 keep_tso = keep_tso->link;
3423 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3426 /* For simplicity, just swap the BQ for the BH */
3427 P_ temp = updatee_keep;
3429 updatee_keep = updatee_bypass;
3430 updatee_bypass = temp;
3432 /* Record the swap in the kept frame (below) */
3433 prev_frame->updatee = updatee_keep;
3438 TICK_UPD_SQUEEZED();
3439 /* wasn't there something about update squeezing and ticky to be
3440 * sorted out? oh yes: we aren't counting each enter properly
3441 * in this case. See the log somewhere. KSW 1999-04-21
3443 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3445 sp = (P_)frame - 1; /* sp = stuff to slide */
3446 displacement += sizeofW(StgUpdateFrame);
3449 /* No squeeze for this frame */
3450 sp = frame_bottom - 1; /* Keep the current frame */
3452 /* Do lazy black-holing.
3454 if (is_update_frame) {
3455 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3456 if (bh->header.info != &stg_BLACKHOLE_info &&
3457 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3458 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3459 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3460 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3462 SET_INFO(bh,&stg_BLACKHOLE_info);
3466 /* Fix the link in the current frame (should point to the frame below) */
3467 frame->link = prev_frame;
3468 prev_was_update_frame = is_update_frame;
3471 /* Now slide all words from sp up to the next frame */
3473 if (displacement > 0) {
3474 P_ next_frame_bottom;
3476 if (next_frame != NULL)
3477 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3479 next_frame_bottom = tso->sp - 1;
3483 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3487 while (sp >= next_frame_bottom) {
3488 sp[displacement] = *sp;
3492 (P_)prev_frame = (P_)frame + displacement;
3496 tso->sp += displacement;
3497 tso->su = prev_frame;
3500 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3501 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3505 //@node Pausing a thread, Index, Stack squeezing
3506 //@subsection Pausing a thread
3508 /* -----------------------------------------------------------------------------
3511 * We have to prepare for GC - this means doing lazy black holing
3512 * here. We also take the opportunity to do stack squeezing if it's
3514 * -------------------------------------------------------------------------- */
3515 //@cindex threadPaused
3517 threadPaused(StgTSO *tso)
3519 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3520 threadSqueezeStack(tso); /* does black holing too */
3522 threadLazyBlackHole(tso);
3525 /* -----------------------------------------------------------------------------
3527 * -------------------------------------------------------------------------- */
3530 //@cindex printMutOnceList
3532 printMutOnceList(generation *gen)
3534 StgMutClosure *p, *next;
3536 p = gen->mut_once_list;
3539 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3540 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3541 fprintf(stderr, "%p (%s), ",
3542 p, info_type((StgClosure *)p));
3544 fputc('\n', stderr);
3547 //@cindex printMutableList
3549 printMutableList(generation *gen)
3551 StgMutClosure *p, *next;
3556 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3557 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3558 fprintf(stderr, "%p (%s), ",
3559 p, info_type((StgClosure *)p));
3561 fputc('\n', stderr);
3564 //@cindex maybeLarge
3565 static inline rtsBool
3566 maybeLarge(StgClosure *closure)
3568 StgInfoTable *info = get_itbl(closure);
3570 /* closure types that may be found on the new_large_objects list;
3571 see scavenge_large */
3572 return (info->type == MUT_ARR_PTRS ||
3573 info->type == MUT_ARR_PTRS_FROZEN ||
3574 info->type == TSO ||
3575 info->type == ARR_WORDS ||
3582 //@node Index, , Pausing a thread
3586 //* GarbageCollect:: @cindex\s-+GarbageCollect
3587 //* MarkRoot:: @cindex\s-+MarkRoot
3588 //* RevertCAFs:: @cindex\s-+RevertCAFs
3589 //* addBlock:: @cindex\s-+addBlock
3590 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3591 //* copy:: @cindex\s-+copy
3592 //* copyPart:: @cindex\s-+copyPart
3593 //* evacuate:: @cindex\s-+evacuate
3594 //* evacuate_large:: @cindex\s-+evacuate_large
3595 //* gcCAFs:: @cindex\s-+gcCAFs
3596 //* isAlive:: @cindex\s-+isAlive
3597 //* maybeLarge:: @cindex\s-+maybeLarge
3598 //* mkMutCons:: @cindex\s-+mkMutCons
3599 //* printMutOnceList:: @cindex\s-+printMutOnceList
3600 //* printMutableList:: @cindex\s-+printMutableList
3601 //* relocate_TSO:: @cindex\s-+relocate_TSO
3602 //* scavenge:: @cindex\s-+scavenge
3603 //* scavenge_large:: @cindex\s-+scavenge_large
3604 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3605 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3606 //* scavenge_one:: @cindex\s-+scavenge_one
3607 //* scavenge_srt:: @cindex\s-+scavenge_srt
3608 //* scavenge_stack:: @cindex\s-+scavenge_stack
3609 //* scavenge_static:: @cindex\s-+scavenge_static
3610 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3611 //* threadPaused:: @cindex\s-+threadPaused
3612 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3613 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3614 //* upd_evacuee:: @cindex\s-+upd_evacuee
3615 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3616 //* zero_static_object_list:: @cindex\s-+zero_static_object_list