1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.103 2001/07/23 10:47:16 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
45 #include "StablePriv.h"
47 #include "ParTicky.h" // ToDo: move into Rts.h
48 #if defined(GRAN) || defined(PAR)
49 # include "GranSimRts.h"
50 # include "ParallelRts.h"
54 # include "ParallelDebug.h"
59 #if defined(RTS_GTK_FRONTPANEL)
60 #include "FrontPanel.h"
63 //@node STATIC OBJECT LIST, Static function declarations, Includes
64 //@subsection STATIC OBJECT LIST
66 /* STATIC OBJECT LIST.
69 * We maintain a linked list of static objects that are still live.
70 * The requirements for this list are:
72 * - we need to scan the list while adding to it, in order to
73 * scavenge all the static objects (in the same way that
74 * breadth-first scavenging works for dynamic objects).
76 * - we need to be able to tell whether an object is already on
77 * the list, to break loops.
79 * Each static object has a "static link field", which we use for
80 * linking objects on to the list. We use a stack-type list, consing
81 * objects on the front as they are added (this means that the
82 * scavenge phase is depth-first, not breadth-first, but that
85 * A separate list is kept for objects that have been scavenged
86 * already - this is so that we can zero all the marks afterwards.
88 * An object is on the list if its static link field is non-zero; this
89 * means that we have to mark the end of the list with '1', not NULL.
91 * Extra notes for generational GC:
93 * Each generation has a static object list associated with it. When
94 * collecting generations up to N, we treat the static object lists
95 * from generations > N as roots.
97 * We build up a static object list while collecting generations 0..N,
98 * which is then appended to the static object list of generation N+1.
100 StgClosure* static_objects; /* live static objects */
101 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
103 /* N is the oldest generation being collected, where the generations
104 * are numbered starting at 0. A major GC (indicated by the major_gc
105 * flag) is when we're collecting all generations. We only attempt to
106 * deal with static objects and GC CAFs when doing a major GC.
109 static rtsBool major_gc;
111 /* Youngest generation that objects should be evacuated to in
112 * evacuate(). (Logically an argument to evacuate, but it's static
113 * a lot of the time so we optimise it into a global variable).
119 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
120 static rtsBool weak_done; /* all done for this pass */
122 /* List of all threads during GC
124 static StgTSO *old_all_threads;
125 static StgTSO *resurrected_threads;
127 /* Flag indicating failure to evacuate an object to the desired
130 static rtsBool failed_to_evac;
132 /* Old to-space (used for two-space collector only)
134 bdescr *old_to_space;
136 /* Data used for allocation area sizing.
138 lnat new_blocks; /* blocks allocated during this GC */
139 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
141 /* Used to avoid long recursion due to selector thunks
143 lnat thunk_selector_depth = 0;
144 #define MAX_THUNK_SELECTOR_DEPTH 256
146 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
147 //@subsection Static function declarations
149 /* -----------------------------------------------------------------------------
150 Static function declarations
151 -------------------------------------------------------------------------- */
153 static StgClosure * evacuate ( StgClosure *q );
154 static void zero_static_object_list ( StgClosure* first_static );
155 static void zero_mutable_list ( StgMutClosure *first );
157 static rtsBool traverse_weak_ptr_list ( void );
158 static void cleanup_weak_ptr_list ( StgWeak **list );
160 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
161 static void scavenge_large ( step * );
162 static void scavenge ( step * );
163 static void scavenge_static ( void );
164 static void scavenge_mutable_list ( generation *g );
165 static void scavenge_mut_once_list ( generation *g );
168 static void gcCAFs ( void );
171 void revertCAFs ( void );
172 void scavengeCAFs ( void );
174 //@node Garbage Collect, Weak Pointers, Static function declarations
175 //@subsection Garbage Collect
177 /* -----------------------------------------------------------------------------
180 For garbage collecting generation N (and all younger generations):
182 - follow all pointers in the root set. the root set includes all
183 mutable objects in all steps in all generations.
185 - for each pointer, evacuate the object it points to into either
186 + to-space in the next higher step in that generation, if one exists,
187 + if the object's generation == N, then evacuate it to the next
188 generation if one exists, or else to-space in the current
190 + if the object's generation < N, then evacuate it to to-space
191 in the next generation.
193 - repeatedly scavenge to-space from each step in each generation
194 being collected until no more objects can be evacuated.
196 - free from-space in each step, and set from-space = to-space.
198 -------------------------------------------------------------------------- */
199 //@cindex GarbageCollect
201 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
205 lnat live, allocated, collected = 0, copied = 0;
209 CostCentreStack *prev_CCS;
212 #if defined(DEBUG) && defined(GRAN)
213 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
217 /* tell the stats department that we've started a GC */
220 /* Init stats and print par specific (timing) info */
221 PAR_TICKY_PAR_START();
223 /* attribute any costs to CCS_GC */
229 /* Approximate how much we allocated.
230 * Todo: only when generating stats?
232 allocated = calcAllocated();
234 /* Figure out which generation to collect
236 if (force_major_gc) {
237 N = RtsFlags.GcFlags.generations - 1;
241 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
242 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
246 major_gc = (N == RtsFlags.GcFlags.generations-1);
249 #ifdef RTS_GTK_FRONTPANEL
250 if (RtsFlags.GcFlags.frontpanel) {
251 updateFrontPanelBeforeGC(N);
255 /* check stack sanity *before* GC (ToDo: check all threads) */
257 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
259 IF_DEBUG(sanity, checkFreeListSanity());
261 /* Initialise the static object lists
263 static_objects = END_OF_STATIC_LIST;
264 scavenged_static_objects = END_OF_STATIC_LIST;
266 /* zero the mutable list for the oldest generation (see comment by
267 * zero_mutable_list below).
270 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
273 /* Save the old to-space if we're doing a two-space collection
275 if (RtsFlags.GcFlags.generations == 1) {
276 old_to_space = g0s0->to_space;
277 g0s0->to_space = NULL;
280 /* Keep a count of how many new blocks we allocated during this GC
281 * (used for resizing the allocation area, later).
285 /* Initialise to-space in all the generations/steps that we're
288 for (g = 0; g <= N; g++) {
289 generations[g].mut_once_list = END_MUT_LIST;
290 generations[g].mut_list = END_MUT_LIST;
292 for (s = 0; s < generations[g].n_steps; s++) {
294 /* generation 0, step 0 doesn't need to-space */
295 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
299 /* Get a free block for to-space. Extra blocks will be chained on
303 stp = &generations[g].steps[s];
304 ASSERT(stp->gen_no == g);
305 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
309 bd->evacuated = 1; /* it's a to-space block */
311 stp->hpLim = stp->hp + BLOCK_SIZE_W;
315 stp->scan = bd->start;
317 stp->new_large_objects = NULL;
318 stp->scavenged_large_objects = NULL;
320 /* mark the large objects as not evacuated yet */
321 for (bd = stp->large_objects; bd; bd = bd->link) {
327 /* make sure the older generations have at least one block to
328 * allocate into (this makes things easier for copy(), see below.
330 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
331 for (s = 0; s < generations[g].n_steps; s++) {
332 stp = &generations[g].steps[s];
333 if (stp->hp_bd == NULL) {
338 bd->evacuated = 0; /* *not* a to-space block */
340 stp->hpLim = stp->hp + BLOCK_SIZE_W;
346 /* Set the scan pointer for older generations: remember we
347 * still have to scavenge objects that have been promoted. */
349 stp->scan_bd = stp->hp_bd;
350 stp->to_space = NULL;
352 stp->new_large_objects = NULL;
353 stp->scavenged_large_objects = NULL;
357 /* -----------------------------------------------------------------------
358 * follow all the roots that we know about:
359 * - mutable lists from each generation > N
360 * we want to *scavenge* these roots, not evacuate them: they're not
361 * going to move in this GC.
362 * Also: do them in reverse generation order. This is because we
363 * often want to promote objects that are pointed to by older
364 * generations early, so we don't have to repeatedly copy them.
365 * Doing the generations in reverse order ensures that we don't end
366 * up in the situation where we want to evac an object to gen 3 and
367 * it has already been evaced to gen 2.
371 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
372 generations[g].saved_mut_list = generations[g].mut_list;
373 generations[g].mut_list = END_MUT_LIST;
376 /* Do the mut-once lists first */
377 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
378 IF_PAR_DEBUG(verbose,
379 printMutOnceList(&generations[g]));
380 scavenge_mut_once_list(&generations[g]);
382 for (st = generations[g].n_steps-1; st >= 0; st--) {
383 scavenge(&generations[g].steps[st]);
387 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
388 IF_PAR_DEBUG(verbose,
389 printMutableList(&generations[g]));
390 scavenge_mutable_list(&generations[g]);
392 for (st = generations[g].n_steps-1; st >= 0; st--) {
393 scavenge(&generations[g].steps[st]);
400 /* follow all the roots that the application knows about.
406 /* And don't forget to mark the TSO if we got here direct from
408 /* Not needed in a seq version?
410 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
414 /* Mark the entries in the GALA table of the parallel system */
415 markLocalGAs(major_gc);
416 /* Mark all entries on the list of pending fetches */
417 markPendingFetches(major_gc);
420 /* Mark the weak pointer list, and prepare to detect dead weak
423 old_weak_ptr_list = weak_ptr_list;
424 weak_ptr_list = NULL;
425 weak_done = rtsFalse;
427 /* The all_threads list is like the weak_ptr_list.
428 * See traverse_weak_ptr_list() for the details.
430 old_all_threads = all_threads;
431 all_threads = END_TSO_QUEUE;
432 resurrected_threads = END_TSO_QUEUE;
434 /* Mark the stable pointer table.
436 markStablePtrTable(major_gc);
440 /* ToDo: To fix the caf leak, we need to make the commented out
441 * parts of this code do something sensible - as described in
444 extern void markHugsObjects(void);
449 /* -------------------------------------------------------------------------
450 * Repeatedly scavenge all the areas we know about until there's no
451 * more scavenging to be done.
458 /* scavenge static objects */
459 if (major_gc && static_objects != END_OF_STATIC_LIST) {
461 checkStaticObjects());
465 /* When scavenging the older generations: Objects may have been
466 * evacuated from generations <= N into older generations, and we
467 * need to scavenge these objects. We're going to try to ensure that
468 * any evacuations that occur move the objects into at least the
469 * same generation as the object being scavenged, otherwise we
470 * have to create new entries on the mutable list for the older
474 /* scavenge each step in generations 0..maxgen */
478 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
479 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
480 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
483 stp = &generations[gen].steps[st];
485 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
490 if (stp->new_large_objects != NULL) {
498 if (flag) { goto loop; }
500 /* must be last... */
501 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
506 /* Final traversal of the weak pointer list (see comment by
507 * cleanUpWeakPtrList below).
509 cleanup_weak_ptr_list(&weak_ptr_list);
511 /* Now see which stable names are still alive.
513 gcStablePtrTable(major_gc);
516 /* Reconstruct the Global Address tables used in GUM */
517 rebuildGAtables(major_gc);
518 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
519 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
522 /* Set the maximum blocks for the oldest generation, based on twice
523 * the amount of live data now, adjusted to fit the maximum heap
526 * This is an approximation, since in the worst case we'll need
527 * twice the amount of live data plus whatever space the other
530 if (RtsFlags.GcFlags.generations > 1) {
532 oldest_gen->max_blocks =
533 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
534 RtsFlags.GcFlags.minOldGenSize);
535 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
536 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
537 if (((int)oldest_gen->max_blocks -
538 (int)oldest_gen->steps[0].to_blocks) <
539 (RtsFlags.GcFlags.pcFreeHeap *
540 RtsFlags.GcFlags.maxHeapSize / 200)) {
547 /* run through all the generations/steps and tidy up
549 copied = new_blocks * BLOCK_SIZE_W;
550 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
553 generations[g].collections++; /* for stats */
556 for (s = 0; s < generations[g].n_steps; s++) {
558 stp = &generations[g].steps[s];
560 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
561 /* Tidy the end of the to-space chains */
562 stp->hp_bd->free = stp->hp;
563 stp->hp_bd->link = NULL;
564 /* stats information: how much we copied */
566 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
571 /* for generations we collected... */
574 collected += stp->n_blocks * BLOCK_SIZE_W; /* for stats */
576 /* free old memory and shift to-space into from-space for all
577 * the collected steps (except the allocation area). These
578 * freed blocks will probaby be quickly recycled.
580 if (!(g == 0 && s == 0)) {
581 freeChain(stp->blocks);
582 stp->blocks = stp->to_space;
583 stp->n_blocks = stp->to_blocks;
584 stp->to_space = NULL;
586 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
587 bd->evacuated = 0; /* now from-space */
591 /* LARGE OBJECTS. The current live large objects are chained on
592 * scavenged_large, having been moved during garbage
593 * collection from large_objects. Any objects left on
594 * large_objects list are therefore dead, so we free them here.
596 for (bd = stp->large_objects; bd != NULL; bd = next) {
601 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
604 stp->large_objects = stp->scavenged_large_objects;
606 /* Set the maximum blocks for this generation, interpolating
607 * between the maximum size of the oldest and youngest
610 * max_blocks = oldgen_max_blocks * G
611 * ----------------------
616 generations[g].max_blocks = (oldest_gen->max_blocks * g)
617 / (RtsFlags.GcFlags.generations-1);
619 generations[g].max_blocks = oldest_gen->max_blocks;
622 /* for older generations... */
625 /* For older generations, we need to append the
626 * scavenged_large_object list (i.e. large objects that have been
627 * promoted during this GC) to the large_object list for that step.
629 for (bd = stp->scavenged_large_objects; bd; bd = next) {
632 dbl_link_onto(bd, &stp->large_objects);
635 /* add the new blocks we promoted during this GC */
636 stp->n_blocks += stp->to_blocks;
641 /* Guess the amount of live data for stats. */
644 /* Free the small objects allocated via allocate(), since this will
645 * all have been copied into G0S1 now.
647 if (small_alloc_list != NULL) {
648 freeChain(small_alloc_list);
650 small_alloc_list = NULL;
654 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
656 /* Two-space collector:
657 * Free the old to-space, and estimate the amount of live data.
659 if (RtsFlags.GcFlags.generations == 1) {
662 if (old_to_space != NULL) {
663 freeChain(old_to_space);
665 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
666 bd->evacuated = 0; /* now from-space */
669 /* For a two-space collector, we need to resize the nursery. */
671 /* set up a new nursery. Allocate a nursery size based on a
672 * function of the amount of live data (currently a factor of 2,
673 * should be configurable (ToDo)). Use the blocks from the old
674 * nursery if possible, freeing up any left over blocks.
676 * If we get near the maximum heap size, then adjust our nursery
677 * size accordingly. If the nursery is the same size as the live
678 * data (L), then we need 3L bytes. We can reduce the size of the
679 * nursery to bring the required memory down near 2L bytes.
681 * A normal 2-space collector would need 4L bytes to give the same
682 * performance we get from 3L bytes, reducing to the same
683 * performance at 2L bytes.
685 blocks = g0s0->to_blocks;
687 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
688 RtsFlags.GcFlags.maxHeapSize ) {
689 int adjusted_blocks; /* signed on purpose */
692 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
693 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
694 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
695 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
698 blocks = adjusted_blocks;
701 blocks *= RtsFlags.GcFlags.oldGenFactor;
702 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
703 blocks = RtsFlags.GcFlags.minAllocAreaSize;
706 resizeNursery(blocks);
709 /* Generational collector:
710 * If the user has given us a suggested heap size, adjust our
711 * allocation area to make best use of the memory available.
714 if (RtsFlags.GcFlags.heapSizeSuggestion) {
716 nat needed = calcNeeded(); /* approx blocks needed at next GC */
718 /* Guess how much will be live in generation 0 step 0 next time.
719 * A good approximation is the obtained by finding the
720 * percentage of g0s0 that was live at the last minor GC.
723 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
726 /* Estimate a size for the allocation area based on the
727 * information available. We might end up going slightly under
728 * or over the suggested heap size, but we should be pretty
731 * Formula: suggested - needed
732 * ----------------------------
733 * 1 + g0s0_pcnt_kept/100
735 * where 'needed' is the amount of memory needed at the next
736 * collection for collecting all steps except g0s0.
739 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
740 (100 + (int)g0s0_pcnt_kept);
742 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
743 blocks = RtsFlags.GcFlags.minAllocAreaSize;
746 resizeNursery((nat)blocks);
750 /* mark the garbage collected CAFs as dead */
751 #if 0 /* doesn't work at the moment */
753 if (major_gc) { gcCAFs(); }
757 /* zero the scavenged static object list */
759 zero_static_object_list(scavenged_static_objects);
766 /* start any pending finalizers */
767 scheduleFinalizers(old_weak_ptr_list);
769 /* send exceptions to any threads which were about to die */
770 resurrectThreads(resurrected_threads);
772 /* check sanity after GC */
773 IF_DEBUG(sanity, checkSanity(N));
775 /* extra GC trace info */
776 IF_DEBUG(gc, stat_describe_gens());
779 /* symbol-table based profiling */
780 /* heapCensus(to_space); */ /* ToDo */
783 /* restore enclosing cost centre */
789 /* check for memory leaks if sanity checking is on */
790 IF_DEBUG(sanity, memInventory());
792 #ifdef RTS_GTK_FRONTPANEL
793 if (RtsFlags.GcFlags.frontpanel) {
794 updateFrontPanelAfterGC( N, live );
798 /* ok, GC over: tell the stats department what happened. */
799 stat_endGC(allocated, collected, live, copied, N);
804 //@node Weak Pointers, Evacuation, Garbage Collect
805 //@subsection Weak Pointers
807 /* -----------------------------------------------------------------------------
810 traverse_weak_ptr_list is called possibly many times during garbage
811 collection. It returns a flag indicating whether it did any work
812 (i.e. called evacuate on any live pointers).
814 Invariant: traverse_weak_ptr_list is called when the heap is in an
815 idempotent state. That means that there are no pending
816 evacuate/scavenge operations. This invariant helps the weak
817 pointer code decide which weak pointers are dead - if there are no
818 new live weak pointers, then all the currently unreachable ones are
821 For generational GC: we just don't try to finalize weak pointers in
822 older generations than the one we're collecting. This could
823 probably be optimised by keeping per-generation lists of weak
824 pointers, but for a few weak pointers this scheme will work.
825 -------------------------------------------------------------------------- */
826 //@cindex traverse_weak_ptr_list
829 traverse_weak_ptr_list(void)
831 StgWeak *w, **last_w, *next_w;
833 rtsBool flag = rtsFalse;
835 if (weak_done) { return rtsFalse; }
837 /* doesn't matter where we evacuate values/finalizers to, since
838 * these pointers are treated as roots (iff the keys are alive).
842 last_w = &old_weak_ptr_list;
843 for (w = old_weak_ptr_list; w; w = next_w) {
845 /* First, this weak pointer might have been evacuated. If so,
846 * remove the forwarding pointer from the weak_ptr_list.
848 if (get_itbl(w)->type == EVACUATED) {
849 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
853 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
854 * called on a live weak pointer object. Just remove it.
856 if (w->header.info == &stg_DEAD_WEAK_info) {
857 next_w = ((StgDeadWeak *)w)->link;
862 ASSERT(get_itbl(w)->type == WEAK);
864 /* Now, check whether the key is reachable.
866 if ((new = isAlive(w->key))) {
868 /* evacuate the value and finalizer */
869 w->value = evacuate(w->value);
870 w->finalizer = evacuate(w->finalizer);
871 /* remove this weak ptr from the old_weak_ptr list */
873 /* and put it on the new weak ptr list */
875 w->link = weak_ptr_list;
878 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
888 /* Now deal with the all_threads list, which behaves somewhat like
889 * the weak ptr list. If we discover any threads that are about to
890 * become garbage, we wake them up and administer an exception.
893 StgTSO *t, *tmp, *next, **prev;
895 prev = &old_all_threads;
896 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
898 /* Threads which have finished or died get dropped from
901 switch (t->what_next) {
902 case ThreadRelocated:
908 next = t->global_link;
914 /* Threads which have already been determined to be alive are
915 * moved onto the all_threads list.
917 (StgClosure *)tmp = isAlive((StgClosure *)t);
919 next = tmp->global_link;
920 tmp->global_link = all_threads;
924 prev = &(t->global_link);
925 next = t->global_link;
930 /* If we didn't make any changes, then we can go round and kill all
931 * the dead weak pointers. The old_weak_ptr list is used as a list
932 * of pending finalizers later on.
934 if (flag == rtsFalse) {
935 cleanup_weak_ptr_list(&old_weak_ptr_list);
936 for (w = old_weak_ptr_list; w; w = w->link) {
937 w->finalizer = evacuate(w->finalizer);
940 /* And resurrect any threads which were about to become garbage.
943 StgTSO *t, *tmp, *next;
944 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
945 next = t->global_link;
946 (StgClosure *)tmp = evacuate((StgClosure *)t);
947 tmp->global_link = resurrected_threads;
948 resurrected_threads = tmp;
958 /* -----------------------------------------------------------------------------
959 After GC, the live weak pointer list may have forwarding pointers
960 on it, because a weak pointer object was evacuated after being
961 moved to the live weak pointer list. We remove those forwarding
964 Also, we don't consider weak pointer objects to be reachable, but
965 we must nevertheless consider them to be "live" and retain them.
966 Therefore any weak pointer objects which haven't as yet been
967 evacuated need to be evacuated now.
968 -------------------------------------------------------------------------- */
970 //@cindex cleanup_weak_ptr_list
973 cleanup_weak_ptr_list ( StgWeak **list )
975 StgWeak *w, **last_w;
978 for (w = *list; w; w = w->link) {
980 if (get_itbl(w)->type == EVACUATED) {
981 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
985 if (Bdescr((P_)w)->evacuated == 0) {
986 (StgClosure *)w = evacuate((StgClosure *)w);
993 /* -----------------------------------------------------------------------------
994 isAlive determines whether the given closure is still alive (after
995 a garbage collection) or not. It returns the new address of the
996 closure if it is alive, or NULL otherwise.
997 -------------------------------------------------------------------------- */
1002 isAlive(StgClosure *p)
1004 const StgInfoTable *info;
1011 /* ToDo: for static closures, check the static link field.
1012 * Problem here is that we sometimes don't set the link field, eg.
1013 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1016 /* ignore closures in generations that we're not collecting. */
1017 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen_no > N) {
1021 switch (info->type) {
1026 case IND_OLDGEN: /* rely on compatible layout with StgInd */
1027 case IND_OLDGEN_PERM:
1028 /* follow indirections */
1029 p = ((StgInd *)p)->indirectee;
1034 return ((StgEvacuated *)p)->evacuee;
1037 size = arr_words_sizeW((StgArrWords *)p);
1041 case MUT_ARR_PTRS_FROZEN:
1042 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1046 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1047 p = (StgClosure *)((StgTSO *)p)->link;
1051 size = tso_sizeW((StgTSO *)p);
1053 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1054 && Bdescr((P_)p)->evacuated)
1068 MarkRoot(StgClosure *root)
1070 # if 0 && defined(PAR) && defined(DEBUG)
1071 StgClosure *foo = evacuate(root);
1072 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1073 ASSERT(isAlive(foo)); // must be in to-space
1076 return evacuate(root);
1081 static void addBlock(step *stp)
1083 bdescr *bd = allocBlock();
1084 bd->gen_no = stp->gen_no;
1087 if (stp->gen_no <= N) {
1093 stp->hp_bd->free = stp->hp;
1094 stp->hp_bd->link = bd;
1095 stp->hp = bd->start;
1096 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1102 //@cindex upd_evacuee
1104 static __inline__ void
1105 upd_evacuee(StgClosure *p, StgClosure *dest)
1107 p->header.info = &stg_EVACUATED_info;
1108 ((StgEvacuated *)p)->evacuee = dest;
1113 static __inline__ StgClosure *
1114 copy(StgClosure *src, nat size, step *stp)
1118 TICK_GC_WORDS_COPIED(size);
1119 /* Find out where we're going, using the handy "to" pointer in
1120 * the step of the source object. If it turns out we need to
1121 * evacuate to an older generation, adjust it here (see comment
1124 if (stp->gen_no < evac_gen) {
1125 #ifdef NO_EAGER_PROMOTION
1126 failed_to_evac = rtsTrue;
1128 stp = &generations[evac_gen].steps[0];
1132 /* chain a new block onto the to-space for the destination step if
1135 if (stp->hp + size >= stp->hpLim) {
1139 for(to = stp->hp, from = (P_)src; size>0; --size) {
1145 upd_evacuee(src,(StgClosure *)dest);
1146 return (StgClosure *)dest;
1149 /* Special version of copy() for when we only want to copy the info
1150 * pointer of an object, but reserve some padding after it. This is
1151 * used to optimise evacuation of BLACKHOLEs.
1156 static __inline__ StgClosure *
1157 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1161 TICK_GC_WORDS_COPIED(size_to_copy);
1162 if (stp->gen_no < evac_gen) {
1163 #ifdef NO_EAGER_PROMOTION
1164 failed_to_evac = rtsTrue;
1166 stp = &generations[evac_gen].steps[0];
1170 if (stp->hp + size_to_reserve >= stp->hpLim) {
1174 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1179 stp->hp += size_to_reserve;
1180 upd_evacuee(src,(StgClosure *)dest);
1181 return (StgClosure *)dest;
1184 //@node Evacuation, Scavenging, Weak Pointers
1185 //@subsection Evacuation
1187 /* -----------------------------------------------------------------------------
1188 Evacuate a large object
1190 This just consists of removing the object from the (doubly-linked)
1191 large_alloc_list, and linking it on to the (singly-linked)
1192 new_large_objects list, from where it will be scavenged later.
1194 Convention: bd->evacuated is /= 0 for a large object that has been
1195 evacuated, or 0 otherwise.
1196 -------------------------------------------------------------------------- */
1198 //@cindex evacuate_large
1201 evacuate_large(StgPtr p, rtsBool mutable)
1203 bdescr *bd = Bdescr(p);
1206 /* should point to the beginning of the block */
1207 ASSERT(((W_)p & BLOCK_MASK) == 0);
1209 /* already evacuated? */
1210 if (bd->evacuated) {
1211 /* Don't forget to set the failed_to_evac flag if we didn't get
1212 * the desired destination (see comments in evacuate()).
1214 if (bd->gen_no < evac_gen) {
1215 failed_to_evac = rtsTrue;
1216 TICK_GC_FAILED_PROMOTION();
1222 /* remove from large_object list */
1224 bd->back->link = bd->link;
1225 } else { /* first object in the list */
1226 stp->large_objects = bd->link;
1229 bd->link->back = bd->back;
1232 /* link it on to the evacuated large object list of the destination step
1235 if (stp->gen_no < evac_gen) {
1236 #ifdef NO_EAGER_PROMOTION
1237 failed_to_evac = rtsTrue;
1239 stp = &generations[evac_gen].steps[0];
1244 bd->gen_no = stp->gen_no;
1245 bd->link = stp->new_large_objects;
1246 stp->new_large_objects = bd;
1250 recordMutable((StgMutClosure *)p);
1254 /* -----------------------------------------------------------------------------
1255 Adding a MUT_CONS to an older generation.
1257 This is necessary from time to time when we end up with an
1258 old-to-new generation pointer in a non-mutable object. We defer
1259 the promotion until the next GC.
1260 -------------------------------------------------------------------------- */
1265 mkMutCons(StgClosure *ptr, generation *gen)
1270 stp = &gen->steps[0];
1272 /* chain a new block onto the to-space for the destination step if
1275 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1279 q = (StgMutVar *)stp->hp;
1280 stp->hp += sizeofW(StgMutVar);
1282 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1284 recordOldToNewPtrs((StgMutClosure *)q);
1286 return (StgClosure *)q;
1289 /* -----------------------------------------------------------------------------
1292 This is called (eventually) for every live object in the system.
1294 The caller to evacuate specifies a desired generation in the
1295 evac_gen global variable. The following conditions apply to
1296 evacuating an object which resides in generation M when we're
1297 collecting up to generation N
1301 else evac to step->to
1303 if M < evac_gen evac to evac_gen, step 0
1305 if the object is already evacuated, then we check which generation
1308 if M >= evac_gen do nothing
1309 if M < evac_gen set failed_to_evac flag to indicate that we
1310 didn't manage to evacuate this object into evac_gen.
1312 -------------------------------------------------------------------------- */
1316 evacuate(StgClosure *q)
1321 const StgInfoTable *info;
1324 if (HEAP_ALLOCED(q)) {
1326 if (bd->gen_no > N) {
1327 /* Can't evacuate this object, because it's in a generation
1328 * older than the ones we're collecting. Let's hope that it's
1329 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1331 if (bd->gen_no < evac_gen) {
1333 failed_to_evac = rtsTrue;
1334 TICK_GC_FAILED_PROMOTION();
1341 else stp = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1344 /* make sure the info pointer is into text space */
1345 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1346 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1349 if (info->type==RBH) {
1350 info = REVERT_INFOPTR(info);
1352 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1353 q, info_type(q), info, info_type_by_ip(info)));
1357 switch (info -> type) {
1360 ASSERT(q->header.info != &stg_MUT_CONS_info);
1362 to = copy(q,sizeW_fromITBL(info),stp);
1363 recordMutable((StgMutClosure *)to);
1368 StgWord w = (StgWord)q->payload[0];
1369 if (q->header.info == Czh_con_info &&
1370 /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */
1371 (StgChar)w <= MAX_CHARLIKE) {
1372 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1374 if (q->header.info == Izh_con_info &&
1375 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1376 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1378 /* else, fall through ... */
1384 return copy(q,sizeofW(StgHeader)+1,stp);
1386 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1391 #ifdef NO_PROMOTE_THUNKS
1392 if (bd->gen_no == 0 &&
1393 bd->step->no != 0 &&
1394 bd->step->no == generations[bd->gen_no].n_steps-1) {
1398 return copy(q,sizeofW(StgHeader)+2,stp);
1406 return copy(q,sizeofW(StgHeader)+2,stp);
1412 case IND_OLDGEN_PERM:
1417 return copy(q,sizeW_fromITBL(info),stp);
1420 case SE_CAF_BLACKHOLE:
1423 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1426 to = copy(q,BLACKHOLE_sizeW(),stp);
1427 recordMutable((StgMutClosure *)to);
1430 case THUNK_SELECTOR:
1432 const StgInfoTable* selectee_info;
1433 StgClosure* selectee = ((StgSelector*)q)->selectee;
1436 selectee_info = get_itbl(selectee);
1437 switch (selectee_info->type) {
1446 StgWord32 offset = info->layout.selector_offset;
1448 /* check that the size is in range */
1450 (StgWord32)(selectee_info->layout.payload.ptrs +
1451 selectee_info->layout.payload.nptrs));
1453 /* perform the selection! */
1454 q = selectee->payload[offset];
1456 /* if we're already in to-space, there's no need to continue
1457 * with the evacuation, just update the source address with
1458 * a pointer to the (evacuated) constructor field.
1460 if (HEAP_ALLOCED(q)) {
1461 bdescr *bd = Bdescr((P_)q);
1462 if (bd->evacuated) {
1463 if (bd->gen_no < evac_gen) {
1464 failed_to_evac = rtsTrue;
1465 TICK_GC_FAILED_PROMOTION();
1471 /* otherwise, carry on and evacuate this constructor field,
1472 * (but not the constructor itself)
1481 case IND_OLDGEN_PERM:
1482 selectee = ((StgInd *)selectee)->indirectee;
1486 selectee = ((StgEvacuated *)selectee)->evacuee;
1489 case THUNK_SELECTOR:
1491 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1492 something) to go into an infinite loop when the nightly
1493 stage2 compiles PrelTup.lhs. */
1495 /* we can't recurse indefinitely in evacuate(), so set a
1496 * limit on the number of times we can go around this
1499 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1501 bd = Bdescr((P_)selectee);
1502 if (!bd->evacuated) {
1503 thunk_selector_depth++;
1504 selectee = evacuate(selectee);
1505 thunk_selector_depth--;
1509 /* otherwise, fall through... */
1521 case SE_CAF_BLACKHOLE:
1525 /* not evaluated yet */
1529 /* a copy of the top-level cases below */
1530 case RBH: // cf. BLACKHOLE_BQ
1532 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1533 to = copy(q,BLACKHOLE_sizeW(),stp);
1534 //ToDo: derive size etc from reverted IP
1535 //to = copy(q,size,stp);
1536 // recordMutable((StgMutClosure *)to);
1541 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1542 to = copy(q,sizeofW(StgBlockedFetch),stp);
1549 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1550 to = copy(q,sizeofW(StgFetchMe),stp);
1554 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1555 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1560 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1561 (int)(selectee_info->type));
1564 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1568 /* follow chains of indirections, don't evacuate them */
1569 q = ((StgInd*)q)->indirectee;
1573 if (info->srt_len > 0 && major_gc &&
1574 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1575 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1576 static_objects = (StgClosure *)q;
1581 if (info->srt_len > 0 && major_gc &&
1582 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1583 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1584 static_objects = (StgClosure *)q;
1589 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1590 * on the CAF list, so don't do anything with it here (we'll
1591 * scavenge it later).
1594 && ((StgIndStatic *)q)->saved_info == NULL
1595 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1596 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1597 static_objects = (StgClosure *)q;
1602 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1603 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1604 static_objects = (StgClosure *)q;
1608 case CONSTR_INTLIKE:
1609 case CONSTR_CHARLIKE:
1610 case CONSTR_NOCAF_STATIC:
1611 /* no need to put these on the static linked list, they don't need
1626 /* shouldn't see these */
1627 barf("evacuate: stack frame at %p\n", q);
1631 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1632 * of stack, tagging and all.
1634 * They can be larger than a block in size. Both are only
1635 * allocated via allocate(), so they should be chained on to the
1636 * large_object list.
1639 nat size = pap_sizeW((StgPAP*)q);
1640 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1641 evacuate_large((P_)q, rtsFalse);
1644 return copy(q,size,stp);
1649 /* Already evacuated, just return the forwarding address.
1650 * HOWEVER: if the requested destination generation (evac_gen) is
1651 * older than the actual generation (because the object was
1652 * already evacuated to a younger generation) then we have to
1653 * set the failed_to_evac flag to indicate that we couldn't
1654 * manage to promote the object to the desired generation.
1656 if (evac_gen > 0) { /* optimisation */
1657 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1658 if (Bdescr((P_)p)->gen_no < evac_gen) {
1659 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1660 failed_to_evac = rtsTrue;
1661 TICK_GC_FAILED_PROMOTION();
1664 return ((StgEvacuated*)q)->evacuee;
1668 nat size = arr_words_sizeW((StgArrWords *)q);
1670 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1671 evacuate_large((P_)q, rtsFalse);
1674 /* just copy the block */
1675 return copy(q,size,stp);
1680 case MUT_ARR_PTRS_FROZEN:
1682 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1684 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1685 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1688 /* just copy the block */
1689 to = copy(q,size,stp);
1690 if (info->type == MUT_ARR_PTRS) {
1691 recordMutable((StgMutClosure *)to);
1699 StgTSO *tso = (StgTSO *)q;
1700 nat size = tso_sizeW(tso);
1703 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1705 if (tso->what_next == ThreadRelocated) {
1706 q = (StgClosure *)tso->link;
1710 /* Large TSOs don't get moved, so no relocation is required.
1712 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1713 evacuate_large((P_)q, rtsTrue);
1716 /* To evacuate a small TSO, we need to relocate the update frame
1720 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1722 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1724 /* relocate the stack pointers... */
1725 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1726 new_tso->sp = (StgPtr)new_tso->sp + diff;
1728 relocate_TSO(tso, new_tso);
1730 recordMutable((StgMutClosure *)new_tso);
1731 return (StgClosure *)new_tso;
1736 case RBH: // cf. BLACKHOLE_BQ
1738 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1739 to = copy(q,BLACKHOLE_sizeW(),stp);
1740 //ToDo: derive size etc from reverted IP
1741 //to = copy(q,size,stp);
1742 recordMutable((StgMutClosure *)to);
1744 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1745 q, info_type(q), to, info_type(to)));
1750 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1751 to = copy(q,sizeofW(StgBlockedFetch),stp);
1753 belch("@@ evacuate: %p (%s) to %p (%s)",
1754 q, info_type(q), to, info_type(to)));
1761 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1762 to = copy(q,sizeofW(StgFetchMe),stp);
1764 belch("@@ evacuate: %p (%s) to %p (%s)",
1765 q, info_type(q), to, info_type(to)));
1769 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1770 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1772 belch("@@ evacuate: %p (%s) to %p (%s)",
1773 q, info_type(q), to, info_type(to)));
1778 barf("evacuate: strange closure type %d", (int)(info->type));
1784 /* -----------------------------------------------------------------------------
1785 relocate_TSO is called just after a TSO has been copied from src to
1786 dest. It adjusts the update frame list for the new location.
1787 -------------------------------------------------------------------------- */
1788 //@cindex relocate_TSO
1791 relocate_TSO(StgTSO *src, StgTSO *dest)
1798 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1802 while ((P_)su < dest->stack + dest->stack_size) {
1803 switch (get_itbl(su)->type) {
1805 /* GCC actually manages to common up these three cases! */
1808 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1813 cf = (StgCatchFrame *)su;
1814 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1819 sf = (StgSeqFrame *)su;
1820 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1829 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1837 //@node Scavenging, Reverting CAFs, Evacuation
1838 //@subsection Scavenging
1840 //@cindex scavenge_srt
1843 scavenge_srt(const StgInfoTable *info)
1845 StgClosure **srt, **srt_end;
1847 /* evacuate the SRT. If srt_len is zero, then there isn't an
1848 * srt field in the info table. That's ok, because we'll
1849 * never dereference it.
1851 srt = (StgClosure **)(info->srt);
1852 srt_end = srt + info->srt_len;
1853 for (; srt < srt_end; srt++) {
1854 /* Special-case to handle references to closures hiding out in DLLs, since
1855 double indirections required to get at those. The code generator knows
1856 which is which when generating the SRT, so it stores the (indirect)
1857 reference to the DLL closure in the table by first adding one to it.
1858 We check for this here, and undo the addition before evacuating it.
1860 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1861 closure that's fixed at link-time, and no extra magic is required.
1863 #ifdef ENABLE_WIN32_DLL_SUPPORT
1864 if ( (unsigned long)(*srt) & 0x1 ) {
1865 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1875 /* -----------------------------------------------------------------------------
1877 -------------------------------------------------------------------------- */
1880 scavengeTSO (StgTSO *tso)
1882 /* chase the link field for any TSOs on the same queue */
1883 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1884 if ( tso->why_blocked == BlockedOnMVar
1885 || tso->why_blocked == BlockedOnBlackHole
1886 || tso->why_blocked == BlockedOnException
1888 || tso->why_blocked == BlockedOnGA
1889 || tso->why_blocked == BlockedOnGA_NoSend
1892 tso->block_info.closure = evacuate(tso->block_info.closure);
1894 if ( tso->blocked_exceptions != NULL ) {
1895 tso->blocked_exceptions =
1896 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1898 /* scavenge this thread's stack */
1899 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1902 /* -----------------------------------------------------------------------------
1903 Scavenge a given step until there are no more objects in this step
1906 evac_gen is set by the caller to be either zero (for a step in a
1907 generation < N) or G where G is the generation of the step being
1910 We sometimes temporarily change evac_gen back to zero if we're
1911 scavenging a mutable object where early promotion isn't such a good
1913 -------------------------------------------------------------------------- */
1920 const StgInfoTable *info;
1922 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1927 failed_to_evac = rtsFalse;
1929 /* scavenge phase - standard breadth-first scavenging of the
1933 while (bd != stp->hp_bd || p < stp->hp) {
1935 /* If we're at the end of this block, move on to the next block */
1936 if (bd != stp->hp_bd && p == bd->free) {
1942 q = p; /* save ptr to object */
1944 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1945 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1947 info = get_itbl((StgClosure *)p);
1949 if (info->type==RBH)
1950 info = REVERT_INFOPTR(info);
1953 switch (info -> type) {
1956 /* treat MVars specially, because we don't want to evacuate the
1957 * mut_link field in the middle of the closure.
1960 StgMVar *mvar = ((StgMVar *)p);
1962 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1963 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1964 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1965 p += sizeofW(StgMVar);
1966 evac_gen = saved_evac_gen;
1974 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1975 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1976 p += sizeofW(StgHeader) + 2;
1981 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1982 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1988 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1989 p += sizeofW(StgHeader) + 1;
1994 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
2000 p += sizeofW(StgHeader) + 1;
2007 p += sizeofW(StgHeader) + 2;
2014 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2015 p += sizeofW(StgHeader) + 2;
2031 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2032 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2033 (StgClosure *)*p = evacuate((StgClosure *)*p);
2035 p += info->layout.payload.nptrs;
2040 if (stp->gen_no != 0) {
2041 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2044 case IND_OLDGEN_PERM:
2045 ((StgIndOldGen *)p)->indirectee =
2046 evacuate(((StgIndOldGen *)p)->indirectee);
2047 if (failed_to_evac) {
2048 failed_to_evac = rtsFalse;
2049 recordOldToNewPtrs((StgMutClosure *)p);
2051 p += sizeofW(StgIndOldGen);
2055 /* ignore MUT_CONSs */
2056 if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) {
2058 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2059 evac_gen = saved_evac_gen;
2061 p += sizeofW(StgMutVar);
2065 case SE_CAF_BLACKHOLE:
2068 p += BLACKHOLE_sizeW();
2073 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2074 (StgClosure *)bh->blocking_queue =
2075 evacuate((StgClosure *)bh->blocking_queue);
2076 if (failed_to_evac) {
2077 failed_to_evac = rtsFalse;
2078 recordMutable((StgMutClosure *)bh);
2080 p += BLACKHOLE_sizeW();
2084 case THUNK_SELECTOR:
2086 StgSelector *s = (StgSelector *)p;
2087 s->selectee = evacuate(s->selectee);
2088 p += THUNK_SELECTOR_sizeW();
2094 barf("scavenge:IND???\n");
2096 case CONSTR_INTLIKE:
2097 case CONSTR_CHARLIKE:
2099 case CONSTR_NOCAF_STATIC:
2103 /* Shouldn't see a static object here. */
2104 barf("scavenge: STATIC object\n");
2116 /* Shouldn't see stack frames here. */
2117 barf("scavenge: stack frame\n");
2119 case AP_UPD: /* same as PAPs */
2121 /* Treat a PAP just like a section of stack, not forgetting to
2122 * evacuate the function pointer too...
2125 StgPAP* pap = (StgPAP *)p;
2127 pap->fun = evacuate(pap->fun);
2128 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2129 p += pap_sizeW(pap);
2134 /* nothing to follow */
2135 p += arr_words_sizeW((StgArrWords *)p);
2139 /* follow everything */
2143 evac_gen = 0; /* repeatedly mutable */
2144 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2145 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2146 (StgClosure *)*p = evacuate((StgClosure *)*p);
2148 evac_gen = saved_evac_gen;
2152 case MUT_ARR_PTRS_FROZEN:
2153 /* follow everything */
2155 StgPtr start = p, next;
2157 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2158 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2159 (StgClosure *)*p = evacuate((StgClosure *)*p);
2161 if (failed_to_evac) {
2162 /* we can do this easier... */
2163 recordMutable((StgMutClosure *)start);
2164 failed_to_evac = rtsFalse;
2171 StgTSO *tso = (StgTSO *)p;
2174 evac_gen = saved_evac_gen;
2175 p += tso_sizeW(tso);
2180 case RBH: // cf. BLACKHOLE_BQ
2182 // nat size, ptrs, nonptrs, vhs;
2184 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2185 StgRBH *rbh = (StgRBH *)p;
2186 (StgClosure *)rbh->blocking_queue =
2187 evacuate((StgClosure *)rbh->blocking_queue);
2188 if (failed_to_evac) {
2189 failed_to_evac = rtsFalse;
2190 recordMutable((StgMutClosure *)rbh);
2193 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2194 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2195 // ToDo: use size of reverted closure here!
2196 p += BLACKHOLE_sizeW();
2202 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2203 /* follow the pointer to the node which is being demanded */
2204 (StgClosure *)bf->node =
2205 evacuate((StgClosure *)bf->node);
2206 /* follow the link to the rest of the blocking queue */
2207 (StgClosure *)bf->link =
2208 evacuate((StgClosure *)bf->link);
2209 if (failed_to_evac) {
2210 failed_to_evac = rtsFalse;
2211 recordMutable((StgMutClosure *)bf);
2214 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2215 bf, info_type((StgClosure *)bf),
2216 bf->node, info_type(bf->node)));
2217 p += sizeofW(StgBlockedFetch);
2225 p += sizeofW(StgFetchMe);
2226 break; // nothing to do in this case
2228 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2230 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2231 (StgClosure *)fmbq->blocking_queue =
2232 evacuate((StgClosure *)fmbq->blocking_queue);
2233 if (failed_to_evac) {
2234 failed_to_evac = rtsFalse;
2235 recordMutable((StgMutClosure *)fmbq);
2238 belch("@@ scavenge: %p (%s) exciting, isn't it",
2239 p, info_type((StgClosure *)p)));
2240 p += sizeofW(StgFetchMeBlockingQueue);
2246 barf("scavenge: unimplemented/strange closure type %d @ %p",
2250 barf("scavenge: unimplemented/strange closure type %d @ %p",
2254 /* If we didn't manage to promote all the objects pointed to by
2255 * the current object, then we have to designate this object as
2256 * mutable (because it contains old-to-new generation pointers).
2258 if (failed_to_evac) {
2259 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2260 failed_to_evac = rtsFalse;
2268 /* -----------------------------------------------------------------------------
2269 Scavenge one object.
2271 This is used for objects that are temporarily marked as mutable
2272 because they contain old-to-new generation pointers. Only certain
2273 objects can have this property.
2274 -------------------------------------------------------------------------- */
2275 //@cindex scavenge_one
2278 scavenge_one(StgClosure *p)
2280 const StgInfoTable *info;
2283 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2284 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2289 if (info->type==RBH)
2290 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2293 switch (info -> type) {
2296 case FUN_1_0: /* hardly worth specialising these guys */
2316 case IND_OLDGEN_PERM:
2320 end = (P_)p->payload + info->layout.payload.ptrs;
2321 for (q = (P_)p->payload; q < end; q++) {
2322 (StgClosure *)*q = evacuate((StgClosure *)*q);
2328 case SE_CAF_BLACKHOLE:
2333 case THUNK_SELECTOR:
2335 StgSelector *s = (StgSelector *)p;
2336 s->selectee = evacuate(s->selectee);
2340 case AP_UPD: /* same as PAPs */
2342 /* Treat a PAP just like a section of stack, not forgetting to
2343 * evacuate the function pointer too...
2346 StgPAP* pap = (StgPAP *)p;
2348 pap->fun = evacuate(pap->fun);
2349 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2354 /* This might happen if for instance a MUT_CONS was pointing to a
2355 * THUNK which has since been updated. The IND_OLDGEN will
2356 * be on the mutable list anyway, so we don't need to do anything
2362 barf("scavenge_one: strange object %d", (int)(info->type));
2365 no_luck = failed_to_evac;
2366 failed_to_evac = rtsFalse;
2371 /* -----------------------------------------------------------------------------
2372 Scavenging mutable lists.
2374 We treat the mutable list of each generation > N (i.e. all the
2375 generations older than the one being collected) as roots. We also
2376 remove non-mutable objects from the mutable list at this point.
2377 -------------------------------------------------------------------------- */
2378 //@cindex scavenge_mut_once_list
2381 scavenge_mut_once_list(generation *gen)
2383 const StgInfoTable *info;
2384 StgMutClosure *p, *next, *new_list;
2386 p = gen->mut_once_list;
2387 new_list = END_MUT_LIST;
2391 failed_to_evac = rtsFalse;
2393 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2395 /* make sure the info pointer is into text space */
2396 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2397 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2401 if (info->type==RBH)
2402 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2404 switch(info->type) {
2407 case IND_OLDGEN_PERM:
2409 /* Try to pull the indirectee into this generation, so we can
2410 * remove the indirection from the mutable list.
2412 ((StgIndOldGen *)p)->indirectee =
2413 evacuate(((StgIndOldGen *)p)->indirectee);
2416 if (RtsFlags.DebugFlags.gc)
2417 /* Debugging code to print out the size of the thing we just
2421 StgPtr start = gen->steps[0].scan;
2422 bdescr *start_bd = gen->steps[0].scan_bd;
2424 scavenge(&gen->steps[0]);
2425 if (start_bd != gen->steps[0].scan_bd) {
2426 size += (P_)BLOCK_ROUND_UP(start) - start;
2427 start_bd = start_bd->link;
2428 while (start_bd != gen->steps[0].scan_bd) {
2429 size += BLOCK_SIZE_W;
2430 start_bd = start_bd->link;
2432 size += gen->steps[0].scan -
2433 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2435 size = gen->steps[0].scan - start;
2437 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2441 /* failed_to_evac might happen if we've got more than two
2442 * generations, we're collecting only generation 0, the
2443 * indirection resides in generation 2 and the indirectee is
2446 if (failed_to_evac) {
2447 failed_to_evac = rtsFalse;
2448 p->mut_link = new_list;
2451 /* the mut_link field of an IND_STATIC is overloaded as the
2452 * static link field too (it just so happens that we don't need
2453 * both at the same time), so we need to NULL it out when
2454 * removing this object from the mutable list because the static
2455 * link fields are all assumed to be NULL before doing a major
2463 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2464 * it from the mutable list if possible by promoting whatever it
2467 ASSERT(p->header.info == &stg_MUT_CONS_info);
2468 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2469 /* didn't manage to promote everything, so put the
2470 * MUT_CONS back on the list.
2472 p->mut_link = new_list;
2478 /* shouldn't have anything else on the mutables list */
2479 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2483 gen->mut_once_list = new_list;
2486 //@cindex scavenge_mutable_list
2489 scavenge_mutable_list(generation *gen)
2491 const StgInfoTable *info;
2492 StgMutClosure *p, *next;
2494 p = gen->saved_mut_list;
2498 failed_to_evac = rtsFalse;
2500 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2502 /* make sure the info pointer is into text space */
2503 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2504 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2508 if (info->type==RBH)
2509 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2511 switch(info->type) {
2513 case MUT_ARR_PTRS_FROZEN:
2514 /* remove this guy from the mutable list, but follow the ptrs
2515 * anyway (and make sure they get promoted to this gen).
2520 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2522 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2523 (StgClosure *)*q = evacuate((StgClosure *)*q);
2527 if (failed_to_evac) {
2528 failed_to_evac = rtsFalse;
2529 p->mut_link = gen->mut_list;
2536 /* follow everything */
2537 p->mut_link = gen->mut_list;
2542 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2543 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2544 (StgClosure *)*q = evacuate((StgClosure *)*q);
2550 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2551 * it from the mutable list if possible by promoting whatever it
2554 ASSERT(p->header.info != &stg_MUT_CONS_info);
2555 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2556 p->mut_link = gen->mut_list;
2562 StgMVar *mvar = (StgMVar *)p;
2563 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2564 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2565 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2566 p->mut_link = gen->mut_list;
2573 StgTSO *tso = (StgTSO *)p;
2577 /* Don't take this TSO off the mutable list - it might still
2578 * point to some younger objects (because we set evac_gen to 0
2581 tso->mut_link = gen->mut_list;
2582 gen->mut_list = (StgMutClosure *)tso;
2588 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2589 (StgClosure *)bh->blocking_queue =
2590 evacuate((StgClosure *)bh->blocking_queue);
2591 p->mut_link = gen->mut_list;
2596 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2599 case IND_OLDGEN_PERM:
2600 /* Try to pull the indirectee into this generation, so we can
2601 * remove the indirection from the mutable list.
2604 ((StgIndOldGen *)p)->indirectee =
2605 evacuate(((StgIndOldGen *)p)->indirectee);
2608 if (failed_to_evac) {
2609 failed_to_evac = rtsFalse;
2610 p->mut_link = gen->mut_once_list;
2611 gen->mut_once_list = p;
2618 // HWL: check whether all of these are necessary
2620 case RBH: // cf. BLACKHOLE_BQ
2622 // nat size, ptrs, nonptrs, vhs;
2624 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2625 StgRBH *rbh = (StgRBH *)p;
2626 (StgClosure *)rbh->blocking_queue =
2627 evacuate((StgClosure *)rbh->blocking_queue);
2628 if (failed_to_evac) {
2629 failed_to_evac = rtsFalse;
2630 recordMutable((StgMutClosure *)rbh);
2632 // ToDo: use size of reverted closure here!
2633 p += BLACKHOLE_sizeW();
2639 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2640 /* follow the pointer to the node which is being demanded */
2641 (StgClosure *)bf->node =
2642 evacuate((StgClosure *)bf->node);
2643 /* follow the link to the rest of the blocking queue */
2644 (StgClosure *)bf->link =
2645 evacuate((StgClosure *)bf->link);
2646 if (failed_to_evac) {
2647 failed_to_evac = rtsFalse;
2648 recordMutable((StgMutClosure *)bf);
2650 p += sizeofW(StgBlockedFetch);
2656 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
2659 p += sizeofW(StgFetchMe);
2660 break; // nothing to do in this case
2662 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2664 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2665 (StgClosure *)fmbq->blocking_queue =
2666 evacuate((StgClosure *)fmbq->blocking_queue);
2667 if (failed_to_evac) {
2668 failed_to_evac = rtsFalse;
2669 recordMutable((StgMutClosure *)fmbq);
2671 p += sizeofW(StgFetchMeBlockingQueue);
2677 /* shouldn't have anything else on the mutables list */
2678 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2683 //@cindex scavenge_static
2686 scavenge_static(void)
2688 StgClosure* p = static_objects;
2689 const StgInfoTable *info;
2691 /* Always evacuate straight to the oldest generation for static
2693 evac_gen = oldest_gen->no;
2695 /* keep going until we've scavenged all the objects on the linked
2697 while (p != END_OF_STATIC_LIST) {
2701 if (info->type==RBH)
2702 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2704 /* make sure the info pointer is into text space */
2705 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2706 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2708 /* Take this object *off* the static_objects list,
2709 * and put it on the scavenged_static_objects list.
2711 static_objects = STATIC_LINK(info,p);
2712 STATIC_LINK(info,p) = scavenged_static_objects;
2713 scavenged_static_objects = p;
2715 switch (info -> type) {
2719 StgInd *ind = (StgInd *)p;
2720 ind->indirectee = evacuate(ind->indirectee);
2722 /* might fail to evacuate it, in which case we have to pop it
2723 * back on the mutable list (and take it off the
2724 * scavenged_static list because the static link and mut link
2725 * pointers are one and the same).
2727 if (failed_to_evac) {
2728 failed_to_evac = rtsFalse;
2729 scavenged_static_objects = STATIC_LINK(info,p);
2730 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2731 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2745 next = (P_)p->payload + info->layout.payload.ptrs;
2746 /* evacuate the pointers */
2747 for (q = (P_)p->payload; q < next; q++) {
2748 (StgClosure *)*q = evacuate((StgClosure *)*q);
2754 barf("scavenge_static: strange closure %d", (int)(info->type));
2757 ASSERT(failed_to_evac == rtsFalse);
2759 /* get the next static object from the list. Remember, there might
2760 * be more stuff on this list now that we've done some evacuating!
2761 * (static_objects is a global)
2767 /* -----------------------------------------------------------------------------
2768 scavenge_stack walks over a section of stack and evacuates all the
2769 objects pointed to by it. We can use the same code for walking
2770 PAPs, since these are just sections of copied stack.
2771 -------------------------------------------------------------------------- */
2772 //@cindex scavenge_stack
2775 scavenge_stack(StgPtr p, StgPtr stack_end)
2778 const StgInfoTable* info;
2781 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2784 * Each time around this loop, we are looking at a chunk of stack
2785 * that starts with either a pending argument section or an
2786 * activation record.
2789 while (p < stack_end) {
2792 /* If we've got a tag, skip over that many words on the stack */
2793 if (IS_ARG_TAG((W_)q)) {
2798 /* Is q a pointer to a closure?
2800 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2802 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2803 ASSERT(closure_STATIC((StgClosure *)q));
2805 /* otherwise, must be a pointer into the allocation space. */
2808 (StgClosure *)*p = evacuate((StgClosure *)q);
2814 * Otherwise, q must be the info pointer of an activation
2815 * record. All activation records have 'bitmap' style layout
2818 info = get_itbl((StgClosure *)p);
2820 switch (info->type) {
2822 /* Dynamic bitmap: the mask is stored on the stack */
2824 bitmap = ((StgRetDyn *)p)->liveness;
2825 p = (P_)&((StgRetDyn *)p)->payload[0];
2828 /* probably a slow-entry point return address: */
2836 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2837 old_p, p, old_p+1));
2839 p++; /* what if FHS!=1 !? -- HWL */
2844 /* Specialised code for update frames, since they're so common.
2845 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2846 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2850 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2852 nat type = get_itbl(frame->updatee)->type;
2854 p += sizeofW(StgUpdateFrame);
2855 if (type == EVACUATED) {
2856 frame->updatee = evacuate(frame->updatee);
2859 bdescr *bd = Bdescr((P_)frame->updatee);
2861 if (bd->gen_no > N) {
2862 if (bd->gen_no < evac_gen) {
2863 failed_to_evac = rtsTrue;
2868 /* Don't promote blackholes */
2870 if (!(stp->gen_no == 0 &&
2872 stp->no == stp->gen->n_steps-1)) {
2879 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2880 sizeofW(StgHeader), stp);
2881 frame->updatee = to;
2884 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
2885 frame->updatee = to;
2886 recordMutable((StgMutClosure *)to);
2889 /* will never be SE_{,CAF_}BLACKHOLE, since we
2890 don't push an update frame for single-entry thunks. KSW 1999-01. */
2891 barf("scavenge_stack: UPDATE_FRAME updatee");
2896 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2903 bitmap = info->layout.bitmap;
2905 /* this assumes that the payload starts immediately after the info-ptr */
2907 while (bitmap != 0) {
2908 if ((bitmap & 1) == 0) {
2909 (StgClosure *)*p = evacuate((StgClosure *)*p);
2912 bitmap = bitmap >> 1;
2919 /* large bitmap (> 32 entries) */
2924 StgLargeBitmap *large_bitmap;
2927 large_bitmap = info->layout.large_bitmap;
2930 for (i=0; i<large_bitmap->size; i++) {
2931 bitmap = large_bitmap->bitmap[i];
2932 q = p + sizeof(W_) * 8;
2933 while (bitmap != 0) {
2934 if ((bitmap & 1) == 0) {
2935 (StgClosure *)*p = evacuate((StgClosure *)*p);
2938 bitmap = bitmap >> 1;
2940 if (i+1 < large_bitmap->size) {
2942 (StgClosure *)*p = evacuate((StgClosure *)*p);
2948 /* and don't forget to follow the SRT */
2953 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2958 /*-----------------------------------------------------------------------------
2959 scavenge the large object list.
2961 evac_gen set by caller; similar games played with evac_gen as with
2962 scavenge() - see comment at the top of scavenge(). Most large
2963 objects are (repeatedly) mutable, so most of the time evac_gen will
2965 --------------------------------------------------------------------------- */
2966 //@cindex scavenge_large
2969 scavenge_large(step *stp)
2973 const StgInfoTable* info;
2974 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2976 evac_gen = 0; /* most objects are mutable */
2977 bd = stp->new_large_objects;
2979 for (; bd != NULL; bd = stp->new_large_objects) {
2981 /* take this object *off* the large objects list and put it on
2982 * the scavenged large objects list. This is so that we can
2983 * treat new_large_objects as a stack and push new objects on
2984 * the front when evacuating.
2986 stp->new_large_objects = bd->link;
2987 dbl_link_onto(bd, &stp->scavenged_large_objects);
2990 info = get_itbl((StgClosure *)p);
2992 switch (info->type) {
2994 /* only certain objects can be "large"... */
2997 /* nothing to follow */
3001 /* follow everything */
3005 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3006 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3007 (StgClosure *)*p = evacuate((StgClosure *)*p);
3012 case MUT_ARR_PTRS_FROZEN:
3013 /* follow everything */
3015 StgPtr start = p, next;
3017 evac_gen = saved_evac_gen; /* not really mutable */
3018 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3019 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3020 (StgClosure *)*p = evacuate((StgClosure *)*p);
3023 if (failed_to_evac) {
3024 recordMutable((StgMutClosure *)start);
3030 scavengeTSO((StgTSO *)p);
3036 StgPAP* pap = (StgPAP *)p;
3038 evac_gen = saved_evac_gen; /* not really mutable */
3039 pap->fun = evacuate(pap->fun);
3040 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3046 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3051 //@cindex zero_static_object_list
3054 zero_static_object_list(StgClosure* first_static)
3058 const StgInfoTable *info;
3060 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3062 link = STATIC_LINK(info, p);
3063 STATIC_LINK(info,p) = NULL;
3067 /* This function is only needed because we share the mutable link
3068 * field with the static link field in an IND_STATIC, so we have to
3069 * zero the mut_link field before doing a major GC, which needs the
3070 * static link field.
3072 * It doesn't do any harm to zero all the mutable link fields on the
3077 zero_mutable_list( StgMutClosure *first )
3079 StgMutClosure *next, *c;
3081 for (c = first; c != END_MUT_LIST; c = next) {
3087 /* -----------------------------------------------------------------------------
3089 -------------------------------------------------------------------------- */
3096 for (c = (StgIndStatic *)caf_list; c != NULL;
3097 c = (StgIndStatic *)c->static_link)
3099 c->header.info = c->saved_info;
3100 c->saved_info = NULL;
3101 /* could, but not necessary: c->static_link = NULL; */
3107 scavengeCAFs( void )
3112 for (c = (StgIndStatic *)caf_list; c != NULL;
3113 c = (StgIndStatic *)c->static_link)
3115 c->indirectee = evacuate(c->indirectee);
3119 /* -----------------------------------------------------------------------------
3120 Sanity code for CAF garbage collection.
3122 With DEBUG turned on, we manage a CAF list in addition to the SRT
3123 mechanism. After GC, we run down the CAF list and blackhole any
3124 CAFs which have been garbage collected. This means we get an error
3125 whenever the program tries to enter a garbage collected CAF.
3127 Any garbage collected CAFs are taken off the CAF list at the same
3129 -------------------------------------------------------------------------- */
3139 const StgInfoTable *info;
3150 ASSERT(info->type == IND_STATIC);
3152 if (STATIC_LINK(info,p) == NULL) {
3153 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3155 SET_INFO(p,&stg_BLACKHOLE_info);
3156 p = STATIC_LINK2(info,p);
3160 pp = &STATIC_LINK2(info,p);
3167 /* fprintf(stderr, "%d CAFs live\n", i); */
3171 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3172 //@subsection Lazy black holing
3174 /* -----------------------------------------------------------------------------
3177 Whenever a thread returns to the scheduler after possibly doing
3178 some work, we have to run down the stack and black-hole all the
3179 closures referred to by update frames.
3180 -------------------------------------------------------------------------- */
3181 //@cindex threadLazyBlackHole
3184 threadLazyBlackHole(StgTSO *tso)
3186 StgUpdateFrame *update_frame;
3187 StgBlockingQueue *bh;
3190 stack_end = &tso->stack[tso->stack_size];
3191 update_frame = tso->su;
3194 switch (get_itbl(update_frame)->type) {
3197 update_frame = ((StgCatchFrame *)update_frame)->link;
3201 bh = (StgBlockingQueue *)update_frame->updatee;
3203 /* if the thunk is already blackholed, it means we've also
3204 * already blackholed the rest of the thunks on this stack,
3205 * so we can stop early.
3207 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3208 * don't interfere with this optimisation.
3210 if (bh->header.info == &stg_BLACKHOLE_info) {
3214 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3215 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3216 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3217 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3219 SET_INFO(bh,&stg_BLACKHOLE_info);
3222 update_frame = update_frame->link;
3226 update_frame = ((StgSeqFrame *)update_frame)->link;
3232 barf("threadPaused");
3237 //@node Stack squeezing, Pausing a thread, Lazy black holing
3238 //@subsection Stack squeezing
3240 /* -----------------------------------------------------------------------------
3243 * Code largely pinched from old RTS, then hacked to bits. We also do
3244 * lazy black holing here.
3246 * -------------------------------------------------------------------------- */
3247 //@cindex threadSqueezeStack
3250 threadSqueezeStack(StgTSO *tso)
3252 lnat displacement = 0;
3253 StgUpdateFrame *frame;
3254 StgUpdateFrame *next_frame; /* Temporally next */
3255 StgUpdateFrame *prev_frame; /* Temporally previous */
3257 rtsBool prev_was_update_frame;
3259 StgUpdateFrame *top_frame;
3260 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3262 void printObj( StgClosure *obj ); // from Printer.c
3264 top_frame = tso->su;
3267 bottom = &(tso->stack[tso->stack_size]);
3270 /* There must be at least one frame, namely the STOP_FRAME.
3272 ASSERT((P_)frame < bottom);
3274 /* Walk down the stack, reversing the links between frames so that
3275 * we can walk back up as we squeeze from the bottom. Note that
3276 * next_frame and prev_frame refer to next and previous as they were
3277 * added to the stack, rather than the way we see them in this
3278 * walk. (It makes the next loop less confusing.)
3280 * Stop if we find an update frame pointing to a black hole
3281 * (see comment in threadLazyBlackHole()).
3285 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3286 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3287 prev_frame = frame->link;
3288 frame->link = next_frame;
3293 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3294 printObj((StgClosure *)prev_frame);
3295 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3298 switch (get_itbl(frame)->type) {
3301 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3314 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3316 printObj((StgClosure *)prev_frame);
3319 if (get_itbl(frame)->type == UPDATE_FRAME
3320 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3325 /* Now, we're at the bottom. Frame points to the lowest update
3326 * frame on the stack, and its link actually points to the frame
3327 * above. We have to walk back up the stack, squeezing out empty
3328 * update frames and turning the pointers back around on the way
3331 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3332 * we never want to eliminate it anyway. Just walk one step up
3333 * before starting to squeeze. When you get to the topmost frame,
3334 * remember that there are still some words above it that might have
3341 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3344 * Loop through all of the frames (everything except the very
3345 * bottom). Things are complicated by the fact that we have
3346 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3347 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3349 while (frame != NULL) {
3351 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3352 rtsBool is_update_frame;
3354 next_frame = frame->link;
3355 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3358 * 1. both the previous and current frame are update frames
3359 * 2. the current frame is empty
3361 if (prev_was_update_frame && is_update_frame &&
3362 (P_)prev_frame == frame_bottom + displacement) {
3364 /* Now squeeze out the current frame */
3365 StgClosure *updatee_keep = prev_frame->updatee;
3366 StgClosure *updatee_bypass = frame->updatee;
3369 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3373 /* Deal with blocking queues. If both updatees have blocked
3374 * threads, then we should merge the queues into the update
3375 * frame that we're keeping.
3377 * Alternatively, we could just wake them up: they'll just go
3378 * straight to sleep on the proper blackhole! This is less code
3379 * and probably less bug prone, although it's probably much
3382 #if 0 /* do it properly... */
3383 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3384 # error Unimplemented lazy BH warning. (KSW 1999-01)
3386 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3387 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3389 /* Sigh. It has one. Don't lose those threads! */
3390 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3391 /* Urgh. Two queues. Merge them. */
3392 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3394 while (keep_tso->link != END_TSO_QUEUE) {
3395 keep_tso = keep_tso->link;
3397 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3400 /* For simplicity, just swap the BQ for the BH */
3401 P_ temp = updatee_keep;
3403 updatee_keep = updatee_bypass;
3404 updatee_bypass = temp;
3406 /* Record the swap in the kept frame (below) */
3407 prev_frame->updatee = updatee_keep;
3412 TICK_UPD_SQUEEZED();
3413 /* wasn't there something about update squeezing and ticky to be
3414 * sorted out? oh yes: we aren't counting each enter properly
3415 * in this case. See the log somewhere. KSW 1999-04-21
3417 * Check two things: that the two update frames don't point to
3418 * the same object, and that the updatee_bypass isn't already an
3419 * indirection. Both of these cases only happen when we're in a
3420 * block hole-style loop (and there are multiple update frames
3421 * on the stack pointing to the same closure), but they can both
3422 * screw us up if we don't check.
3424 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3425 /* this wakes the threads up */
3426 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3429 sp = (P_)frame - 1; /* sp = stuff to slide */
3430 displacement += sizeofW(StgUpdateFrame);
3433 /* No squeeze for this frame */
3434 sp = frame_bottom - 1; /* Keep the current frame */
3436 /* Do lazy black-holing.
3438 if (is_update_frame) {
3439 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3440 if (bh->header.info != &stg_BLACKHOLE_info &&
3441 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3442 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3443 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3444 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3447 /* zero out the slop so that the sanity checker can tell
3448 * where the next closure is.
3451 StgInfoTable *info = get_itbl(bh);
3452 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3453 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3454 * info is used for a different purpose, and it's exactly the
3455 * same size as a BLACKHOLE in any case.
3457 if (info->type != THUNK_SELECTOR) {
3458 for (i = np; i < np + nw; i++) {
3459 ((StgClosure *)bh)->payload[i] = 0;
3464 SET_INFO(bh,&stg_BLACKHOLE_info);
3468 /* Fix the link in the current frame (should point to the frame below) */
3469 frame->link = prev_frame;
3470 prev_was_update_frame = is_update_frame;
3473 /* Now slide all words from sp up to the next frame */
3475 if (displacement > 0) {
3476 P_ next_frame_bottom;
3478 if (next_frame != NULL)
3479 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3481 next_frame_bottom = tso->sp - 1;
3485 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3489 while (sp >= next_frame_bottom) {
3490 sp[displacement] = *sp;
3494 (P_)prev_frame = (P_)frame + displacement;
3498 tso->sp += displacement;
3499 tso->su = prev_frame;
3502 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3503 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3507 //@node Pausing a thread, Index, Stack squeezing
3508 //@subsection Pausing a thread
3510 /* -----------------------------------------------------------------------------
3513 * We have to prepare for GC - this means doing lazy black holing
3514 * here. We also take the opportunity to do stack squeezing if it's
3516 * -------------------------------------------------------------------------- */
3517 //@cindex threadPaused
3519 threadPaused(StgTSO *tso)
3521 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3522 threadSqueezeStack(tso); /* does black holing too */
3524 threadLazyBlackHole(tso);
3527 /* -----------------------------------------------------------------------------
3529 * -------------------------------------------------------------------------- */
3532 //@cindex printMutOnceList
3534 printMutOnceList(generation *gen)
3536 StgMutClosure *p, *next;
3538 p = gen->mut_once_list;
3541 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3542 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3543 fprintf(stderr, "%p (%s), ",
3544 p, info_type((StgClosure *)p));
3546 fputc('\n', stderr);
3549 //@cindex printMutableList
3551 printMutableList(generation *gen)
3553 StgMutClosure *p, *next;
3558 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3559 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3560 fprintf(stderr, "%p (%s), ",
3561 p, info_type((StgClosure *)p));
3563 fputc('\n', stderr);
3566 //@cindex maybeLarge
3567 static inline rtsBool
3568 maybeLarge(StgClosure *closure)
3570 StgInfoTable *info = get_itbl(closure);
3572 /* closure types that may be found on the new_large_objects list;
3573 see scavenge_large */
3574 return (info->type == MUT_ARR_PTRS ||
3575 info->type == MUT_ARR_PTRS_FROZEN ||
3576 info->type == TSO ||
3577 info->type == ARR_WORDS);
3583 //@node Index, , Pausing a thread
3587 //* GarbageCollect:: @cindex\s-+GarbageCollect
3588 //* MarkRoot:: @cindex\s-+MarkRoot
3589 //* RevertCAFs:: @cindex\s-+RevertCAFs
3590 //* addBlock:: @cindex\s-+addBlock
3591 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3592 //* copy:: @cindex\s-+copy
3593 //* copyPart:: @cindex\s-+copyPart
3594 //* evacuate:: @cindex\s-+evacuate
3595 //* evacuate_large:: @cindex\s-+evacuate_large
3596 //* gcCAFs:: @cindex\s-+gcCAFs
3597 //* isAlive:: @cindex\s-+isAlive
3598 //* maybeLarge:: @cindex\s-+maybeLarge
3599 //* mkMutCons:: @cindex\s-+mkMutCons
3600 //* printMutOnceList:: @cindex\s-+printMutOnceList
3601 //* printMutableList:: @cindex\s-+printMutableList
3602 //* relocate_TSO:: @cindex\s-+relocate_TSO
3603 //* scavenge:: @cindex\s-+scavenge
3604 //* scavenge_large:: @cindex\s-+scavenge_large
3605 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3606 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3607 //* scavenge_one:: @cindex\s-+scavenge_one
3608 //* scavenge_srt:: @cindex\s-+scavenge_srt
3609 //* scavenge_stack:: @cindex\s-+scavenge_stack
3610 //* scavenge_static:: @cindex\s-+scavenge_static
3611 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3612 //* threadPaused:: @cindex\s-+threadPaused
3613 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3614 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3615 //* upd_evacuee:: @cindex\s-+upd_evacuee
3616 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3617 //* zero_static_object_list:: @cindex\s-+zero_static_object_list