1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.101 2001/04/02 14:18:05 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
45 #include "StablePriv.h"
47 #include "ParTicky.h" // ToDo: move into Rts.h
48 #if defined(GRAN) || defined(PAR)
49 # include "GranSimRts.h"
50 # include "ParallelRts.h"
54 # include "ParallelDebug.h"
59 #if defined(RTS_GTK_FRONTPANEL)
60 #include "FrontPanel.h"
63 //@node STATIC OBJECT LIST, Static function declarations, Includes
64 //@subsection STATIC OBJECT LIST
66 /* STATIC OBJECT LIST.
69 * We maintain a linked list of static objects that are still live.
70 * The requirements for this list are:
72 * - we need to scan the list while adding to it, in order to
73 * scavenge all the static objects (in the same way that
74 * breadth-first scavenging works for dynamic objects).
76 * - we need to be able to tell whether an object is already on
77 * the list, to break loops.
79 * Each static object has a "static link field", which we use for
80 * linking objects on to the list. We use a stack-type list, consing
81 * objects on the front as they are added (this means that the
82 * scavenge phase is depth-first, not breadth-first, but that
85 * A separate list is kept for objects that have been scavenged
86 * already - this is so that we can zero all the marks afterwards.
88 * An object is on the list if its static link field is non-zero; this
89 * means that we have to mark the end of the list with '1', not NULL.
91 * Extra notes for generational GC:
93 * Each generation has a static object list associated with it. When
94 * collecting generations up to N, we treat the static object lists
95 * from generations > N as roots.
97 * We build up a static object list while collecting generations 0..N,
98 * which is then appended to the static object list of generation N+1.
100 StgClosure* static_objects; /* live static objects */
101 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
103 /* N is the oldest generation being collected, where the generations
104 * are numbered starting at 0. A major GC (indicated by the major_gc
105 * flag) is when we're collecting all generations. We only attempt to
106 * deal with static objects and GC CAFs when doing a major GC.
109 static rtsBool major_gc;
111 /* Youngest generation that objects should be evacuated to in
112 * evacuate(). (Logically an argument to evacuate, but it's static
113 * a lot of the time so we optimise it into a global variable).
119 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
120 static rtsBool weak_done; /* all done for this pass */
122 /* List of all threads during GC
124 static StgTSO *old_all_threads;
125 static StgTSO *resurrected_threads;
127 /* Flag indicating failure to evacuate an object to the desired
130 static rtsBool failed_to_evac;
132 /* Old to-space (used for two-space collector only)
134 bdescr *old_to_space;
136 /* Data used for allocation area sizing.
138 lnat new_blocks; /* blocks allocated during this GC */
139 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
141 /* Used to avoid long recursion due to selector thunks
143 lnat thunk_selector_depth = 0;
144 #define MAX_THUNK_SELECTOR_DEPTH 256
146 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
147 //@subsection Static function declarations
149 /* -----------------------------------------------------------------------------
150 Static function declarations
151 -------------------------------------------------------------------------- */
153 static StgClosure * evacuate ( StgClosure *q );
154 static void zero_static_object_list ( StgClosure* first_static );
155 static void zero_mutable_list ( StgMutClosure *first );
157 static rtsBool traverse_weak_ptr_list ( void );
158 static void cleanup_weak_ptr_list ( StgWeak **list );
160 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
161 static void scavenge_large ( step * );
162 static void scavenge ( step * );
163 static void scavenge_static ( void );
164 static void scavenge_mutable_list ( generation *g );
165 static void scavenge_mut_once_list ( generation *g );
168 static void gcCAFs ( void );
171 void revertCAFs ( void );
172 void scavengeCAFs ( void );
174 //@node Garbage Collect, Weak Pointers, Static function declarations
175 //@subsection Garbage Collect
177 /* -----------------------------------------------------------------------------
180 For garbage collecting generation N (and all younger generations):
182 - follow all pointers in the root set. the root set includes all
183 mutable objects in all steps in all generations.
185 - for each pointer, evacuate the object it points to into either
186 + to-space in the next higher step in that generation, if one exists,
187 + if the object's generation == N, then evacuate it to the next
188 generation if one exists, or else to-space in the current
190 + if the object's generation < N, then evacuate it to to-space
191 in the next generation.
193 - repeatedly scavenge to-space from each step in each generation
194 being collected until no more objects can be evacuated.
196 - free from-space in each step, and set from-space = to-space.
198 -------------------------------------------------------------------------- */
199 //@cindex GarbageCollect
201 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
205 lnat live, allocated, collected = 0, copied = 0;
209 CostCentreStack *prev_CCS;
212 #if defined(DEBUG) && defined(GRAN)
213 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
217 /* tell the stats department that we've started a GC */
220 /* Init stats and print par specific (timing) info */
221 PAR_TICKY_PAR_START();
223 /* attribute any costs to CCS_GC */
229 /* Approximate how much we allocated.
230 * Todo: only when generating stats?
232 allocated = calcAllocated();
234 /* Figure out which generation to collect
236 if (force_major_gc) {
237 N = RtsFlags.GcFlags.generations - 1;
241 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
242 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
246 major_gc = (N == RtsFlags.GcFlags.generations-1);
249 #ifdef RTS_GTK_FRONTPANEL
250 if (RtsFlags.GcFlags.frontpanel) {
251 updateFrontPanelBeforeGC(N);
255 /* check stack sanity *before* GC (ToDo: check all threads) */
257 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
259 IF_DEBUG(sanity, checkFreeListSanity());
261 /* Initialise the static object lists
263 static_objects = END_OF_STATIC_LIST;
264 scavenged_static_objects = END_OF_STATIC_LIST;
266 /* zero the mutable list for the oldest generation (see comment by
267 * zero_mutable_list below).
270 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
273 /* Save the old to-space if we're doing a two-space collection
275 if (RtsFlags.GcFlags.generations == 1) {
276 old_to_space = g0s0->to_space;
277 g0s0->to_space = NULL;
280 /* Keep a count of how many new blocks we allocated during this GC
281 * (used for resizing the allocation area, later).
285 /* Initialise to-space in all the generations/steps that we're
288 for (g = 0; g <= N; g++) {
289 generations[g].mut_once_list = END_MUT_LIST;
290 generations[g].mut_list = END_MUT_LIST;
292 for (s = 0; s < generations[g].n_steps; s++) {
294 /* generation 0, step 0 doesn't need to-space */
295 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
299 /* Get a free block for to-space. Extra blocks will be chained on
303 stp = &generations[g].steps[s];
304 ASSERT(stp->gen->no == g);
305 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
306 bd->gen = &generations[g];
309 bd->evacuated = 1; /* it's a to-space block */
311 stp->hpLim = stp->hp + BLOCK_SIZE_W;
315 stp->scan = bd->start;
317 stp->new_large_objects = NULL;
318 stp->scavenged_large_objects = NULL;
320 /* mark the large objects as not evacuated yet */
321 for (bd = stp->large_objects; bd; bd = bd->link) {
327 /* make sure the older generations have at least one block to
328 * allocate into (this makes things easier for copy(), see below.
330 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
331 for (s = 0; s < generations[g].n_steps; s++) {
332 stp = &generations[g].steps[s];
333 if (stp->hp_bd == NULL) {
335 bd->gen = &generations[g];
338 bd->evacuated = 0; /* *not* a to-space block */
340 stp->hpLim = stp->hp + BLOCK_SIZE_W;
346 /* Set the scan pointer for older generations: remember we
347 * still have to scavenge objects that have been promoted. */
349 stp->scan_bd = stp->hp_bd;
350 stp->to_space = NULL;
352 stp->new_large_objects = NULL;
353 stp->scavenged_large_objects = NULL;
357 /* -----------------------------------------------------------------------
358 * follow all the roots that we know about:
359 * - mutable lists from each generation > N
360 * we want to *scavenge* these roots, not evacuate them: they're not
361 * going to move in this GC.
362 * Also: do them in reverse generation order. This is because we
363 * often want to promote objects that are pointed to by older
364 * generations early, so we don't have to repeatedly copy them.
365 * Doing the generations in reverse order ensures that we don't end
366 * up in the situation where we want to evac an object to gen 3 and
367 * it has already been evaced to gen 2.
371 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
372 generations[g].saved_mut_list = generations[g].mut_list;
373 generations[g].mut_list = END_MUT_LIST;
376 /* Do the mut-once lists first */
377 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
378 IF_PAR_DEBUG(verbose,
379 printMutOnceList(&generations[g]));
380 scavenge_mut_once_list(&generations[g]);
382 for (st = generations[g].n_steps-1; st >= 0; st--) {
383 scavenge(&generations[g].steps[st]);
387 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
388 IF_PAR_DEBUG(verbose,
389 printMutableList(&generations[g]));
390 scavenge_mutable_list(&generations[g]);
392 for (st = generations[g].n_steps-1; st >= 0; st--) {
393 scavenge(&generations[g].steps[st]);
400 /* follow all the roots that the application knows about.
406 /* And don't forget to mark the TSO if we got here direct from
408 /* Not needed in a seq version?
410 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
414 /* Mark the entries in the GALA table of the parallel system */
415 markLocalGAs(major_gc);
416 /* Mark all entries on the list of pending fetches */
417 markPendingFetches(major_gc);
420 /* Mark the weak pointer list, and prepare to detect dead weak
423 old_weak_ptr_list = weak_ptr_list;
424 weak_ptr_list = NULL;
425 weak_done = rtsFalse;
427 /* The all_threads list is like the weak_ptr_list.
428 * See traverse_weak_ptr_list() for the details.
430 old_all_threads = all_threads;
431 all_threads = END_TSO_QUEUE;
432 resurrected_threads = END_TSO_QUEUE;
434 /* Mark the stable pointer table.
436 markStablePtrTable(major_gc);
440 /* ToDo: To fix the caf leak, we need to make the commented out
441 * parts of this code do something sensible - as described in
444 extern void markHugsObjects(void);
449 /* -------------------------------------------------------------------------
450 * Repeatedly scavenge all the areas we know about until there's no
451 * more scavenging to be done.
458 /* scavenge static objects */
459 if (major_gc && static_objects != END_OF_STATIC_LIST) {
461 checkStaticObjects());
465 /* When scavenging the older generations: Objects may have been
466 * evacuated from generations <= N into older generations, and we
467 * need to scavenge these objects. We're going to try to ensure that
468 * any evacuations that occur move the objects into at least the
469 * same generation as the object being scavenged, otherwise we
470 * have to create new entries on the mutable list for the older
474 /* scavenge each step in generations 0..maxgen */
478 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
479 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
480 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
483 stp = &generations[gen].steps[st];
485 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
490 if (stp->new_large_objects != NULL) {
498 if (flag) { goto loop; }
500 /* must be last... */
501 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
506 /* Final traversal of the weak pointer list (see comment by
507 * cleanUpWeakPtrList below).
509 cleanup_weak_ptr_list(&weak_ptr_list);
511 /* Now see which stable names are still alive.
513 gcStablePtrTable(major_gc);
516 /* Reconstruct the Global Address tables used in GUM */
517 rebuildGAtables(major_gc);
518 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
519 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
522 /* Set the maximum blocks for the oldest generation, based on twice
523 * the amount of live data now, adjusted to fit the maximum heap
526 * This is an approximation, since in the worst case we'll need
527 * twice the amount of live data plus whatever space the other
530 if (RtsFlags.GcFlags.generations > 1) {
532 oldest_gen->max_blocks =
533 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
534 RtsFlags.GcFlags.minOldGenSize);
535 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
536 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
537 if (((int)oldest_gen->max_blocks -
538 (int)oldest_gen->steps[0].to_blocks) <
539 (RtsFlags.GcFlags.pcFreeHeap *
540 RtsFlags.GcFlags.maxHeapSize / 200)) {
547 /* run through all the generations/steps and tidy up
549 copied = new_blocks * BLOCK_SIZE_W;
550 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
553 generations[g].collections++; /* for stats */
556 for (s = 0; s < generations[g].n_steps; s++) {
558 stp = &generations[g].steps[s];
560 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
561 /* Tidy the end of the to-space chains */
562 stp->hp_bd->free = stp->hp;
563 stp->hp_bd->link = NULL;
564 /* stats information: how much we copied */
566 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
571 /* for generations we collected... */
574 collected += stp->n_blocks * BLOCK_SIZE_W; /* for stats */
576 /* free old memory and shift to-space into from-space for all
577 * the collected steps (except the allocation area). These
578 * freed blocks will probaby be quickly recycled.
580 if (!(g == 0 && s == 0)) {
581 freeChain(stp->blocks);
582 stp->blocks = stp->to_space;
583 stp->n_blocks = stp->to_blocks;
584 stp->to_space = NULL;
586 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
587 bd->evacuated = 0; /* now from-space */
591 /* LARGE OBJECTS. The current live large objects are chained on
592 * scavenged_large, having been moved during garbage
593 * collection from large_objects. Any objects left on
594 * large_objects list are therefore dead, so we free them here.
596 for (bd = stp->large_objects; bd != NULL; bd = next) {
601 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
604 stp->large_objects = stp->scavenged_large_objects;
606 /* Set the maximum blocks for this generation, interpolating
607 * between the maximum size of the oldest and youngest
610 * max_blocks = oldgen_max_blocks * G
611 * ----------------------
616 generations[g].max_blocks = (oldest_gen->max_blocks * g)
617 / (RtsFlags.GcFlags.generations-1);
619 generations[g].max_blocks = oldest_gen->max_blocks;
622 /* for older generations... */
625 /* For older generations, we need to append the
626 * scavenged_large_object list (i.e. large objects that have been
627 * promoted during this GC) to the large_object list for that step.
629 for (bd = stp->scavenged_large_objects; bd; bd = next) {
632 dbl_link_onto(bd, &stp->large_objects);
635 /* add the new blocks we promoted during this GC */
636 stp->n_blocks += stp->to_blocks;
641 /* Guess the amount of live data for stats. */
644 /* Free the small objects allocated via allocate(), since this will
645 * all have been copied into G0S1 now.
647 if (small_alloc_list != NULL) {
648 freeChain(small_alloc_list);
650 small_alloc_list = NULL;
654 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
656 /* Two-space collector:
657 * Free the old to-space, and estimate the amount of live data.
659 if (RtsFlags.GcFlags.generations == 1) {
662 if (old_to_space != NULL) {
663 freeChain(old_to_space);
665 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
666 bd->evacuated = 0; /* now from-space */
669 /* For a two-space collector, we need to resize the nursery. */
671 /* set up a new nursery. Allocate a nursery size based on a
672 * function of the amount of live data (currently a factor of 2,
673 * should be configurable (ToDo)). Use the blocks from the old
674 * nursery if possible, freeing up any left over blocks.
676 * If we get near the maximum heap size, then adjust our nursery
677 * size accordingly. If the nursery is the same size as the live
678 * data (L), then we need 3L bytes. We can reduce the size of the
679 * nursery to bring the required memory down near 2L bytes.
681 * A normal 2-space collector would need 4L bytes to give the same
682 * performance we get from 3L bytes, reducing to the same
683 * performance at 2L bytes.
685 blocks = g0s0->to_blocks;
687 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
688 RtsFlags.GcFlags.maxHeapSize ) {
689 int adjusted_blocks; /* signed on purpose */
692 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
693 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
694 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
695 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
698 blocks = adjusted_blocks;
701 blocks *= RtsFlags.GcFlags.oldGenFactor;
702 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
703 blocks = RtsFlags.GcFlags.minAllocAreaSize;
706 resizeNursery(blocks);
709 /* Generational collector:
710 * If the user has given us a suggested heap size, adjust our
711 * allocation area to make best use of the memory available.
714 if (RtsFlags.GcFlags.heapSizeSuggestion) {
716 nat needed = calcNeeded(); /* approx blocks needed at next GC */
718 /* Guess how much will be live in generation 0 step 0 next time.
719 * A good approximation is the obtained by finding the
720 * percentage of g0s0 that was live at the last minor GC.
723 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
726 /* Estimate a size for the allocation area based on the
727 * information available. We might end up going slightly under
728 * or over the suggested heap size, but we should be pretty
731 * Formula: suggested - needed
732 * ----------------------------
733 * 1 + g0s0_pcnt_kept/100
735 * where 'needed' is the amount of memory needed at the next
736 * collection for collecting all steps except g0s0.
739 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
740 (100 + (int)g0s0_pcnt_kept);
742 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
743 blocks = RtsFlags.GcFlags.minAllocAreaSize;
746 resizeNursery((nat)blocks);
750 /* mark the garbage collected CAFs as dead */
751 #if 0 /* doesn't work at the moment */
753 if (major_gc) { gcCAFs(); }
757 /* zero the scavenged static object list */
759 zero_static_object_list(scavenged_static_objects);
766 /* start any pending finalizers */
767 scheduleFinalizers(old_weak_ptr_list);
769 /* send exceptions to any threads which were about to die */
770 resurrectThreads(resurrected_threads);
772 /* check sanity after GC */
773 IF_DEBUG(sanity, checkSanity(N));
775 /* extra GC trace info */
776 IF_DEBUG(gc, stat_describe_gens());
779 /* symbol-table based profiling */
780 /* heapCensus(to_space); */ /* ToDo */
783 /* restore enclosing cost centre */
789 /* check for memory leaks if sanity checking is on */
790 IF_DEBUG(sanity, memInventory());
792 #ifdef RTS_GTK_FRONTPANEL
793 if (RtsFlags.GcFlags.frontpanel) {
794 updateFrontPanelAfterGC( N, live );
798 /* ok, GC over: tell the stats department what happened. */
799 stat_endGC(allocated, collected, live, copied, N);
804 //@node Weak Pointers, Evacuation, Garbage Collect
805 //@subsection Weak Pointers
807 /* -----------------------------------------------------------------------------
810 traverse_weak_ptr_list is called possibly many times during garbage
811 collection. It returns a flag indicating whether it did any work
812 (i.e. called evacuate on any live pointers).
814 Invariant: traverse_weak_ptr_list is called when the heap is in an
815 idempotent state. That means that there are no pending
816 evacuate/scavenge operations. This invariant helps the weak
817 pointer code decide which weak pointers are dead - if there are no
818 new live weak pointers, then all the currently unreachable ones are
821 For generational GC: we just don't try to finalize weak pointers in
822 older generations than the one we're collecting. This could
823 probably be optimised by keeping per-generation lists of weak
824 pointers, but for a few weak pointers this scheme will work.
825 -------------------------------------------------------------------------- */
826 //@cindex traverse_weak_ptr_list
829 traverse_weak_ptr_list(void)
831 StgWeak *w, **last_w, *next_w;
833 rtsBool flag = rtsFalse;
835 if (weak_done) { return rtsFalse; }
837 /* doesn't matter where we evacuate values/finalizers to, since
838 * these pointers are treated as roots (iff the keys are alive).
842 last_w = &old_weak_ptr_list;
843 for (w = old_weak_ptr_list; w; w = next_w) {
845 /* First, this weak pointer might have been evacuated. If so,
846 * remove the forwarding pointer from the weak_ptr_list.
848 if (get_itbl(w)->type == EVACUATED) {
849 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
853 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
854 * called on a live weak pointer object. Just remove it.
856 if (w->header.info == &stg_DEAD_WEAK_info) {
857 next_w = ((StgDeadWeak *)w)->link;
862 ASSERT(get_itbl(w)->type == WEAK);
864 /* Now, check whether the key is reachable.
866 if ((new = isAlive(w->key))) {
868 /* evacuate the value and finalizer */
869 w->value = evacuate(w->value);
870 w->finalizer = evacuate(w->finalizer);
871 /* remove this weak ptr from the old_weak_ptr list */
873 /* and put it on the new weak ptr list */
875 w->link = weak_ptr_list;
878 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
888 /* Now deal with the all_threads list, which behaves somewhat like
889 * the weak ptr list. If we discover any threads that are about to
890 * become garbage, we wake them up and administer an exception.
893 StgTSO *t, *tmp, *next, **prev;
895 prev = &old_all_threads;
896 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
898 /* Threads which have finished or died get dropped from
901 switch (t->what_next) {
902 case ThreadRelocated:
908 next = t->global_link;
914 /* Threads which have already been determined to be alive are
915 * moved onto the all_threads list.
917 (StgClosure *)tmp = isAlive((StgClosure *)t);
919 next = tmp->global_link;
920 tmp->global_link = all_threads;
924 prev = &(t->global_link);
925 next = t->global_link;
930 /* If we didn't make any changes, then we can go round and kill all
931 * the dead weak pointers. The old_weak_ptr list is used as a list
932 * of pending finalizers later on.
934 if (flag == rtsFalse) {
935 cleanup_weak_ptr_list(&old_weak_ptr_list);
936 for (w = old_weak_ptr_list; w; w = w->link) {
937 w->finalizer = evacuate(w->finalizer);
940 /* And resurrect any threads which were about to become garbage.
943 StgTSO *t, *tmp, *next;
944 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
945 next = t->global_link;
946 (StgClosure *)tmp = evacuate((StgClosure *)t);
947 tmp->global_link = resurrected_threads;
948 resurrected_threads = tmp;
958 /* -----------------------------------------------------------------------------
959 After GC, the live weak pointer list may have forwarding pointers
960 on it, because a weak pointer object was evacuated after being
961 moved to the live weak pointer list. We remove those forwarding
964 Also, we don't consider weak pointer objects to be reachable, but
965 we must nevertheless consider them to be "live" and retain them.
966 Therefore any weak pointer objects which haven't as yet been
967 evacuated need to be evacuated now.
968 -------------------------------------------------------------------------- */
970 //@cindex cleanup_weak_ptr_list
973 cleanup_weak_ptr_list ( StgWeak **list )
975 StgWeak *w, **last_w;
978 for (w = *list; w; w = w->link) {
980 if (get_itbl(w)->type == EVACUATED) {
981 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
985 if (Bdescr((P_)w)->evacuated == 0) {
986 (StgClosure *)w = evacuate((StgClosure *)w);
993 /* -----------------------------------------------------------------------------
994 isAlive determines whether the given closure is still alive (after
995 a garbage collection) or not. It returns the new address of the
996 closure if it is alive, or NULL otherwise.
997 -------------------------------------------------------------------------- */
1002 isAlive(StgClosure *p)
1004 const StgInfoTable *info;
1011 /* ToDo: for static closures, check the static link field.
1012 * Problem here is that we sometimes don't set the link field, eg.
1013 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1016 /* ignore closures in generations that we're not collecting. */
1017 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
1021 switch (info->type) {
1026 case IND_OLDGEN: /* rely on compatible layout with StgInd */
1027 case IND_OLDGEN_PERM:
1028 /* follow indirections */
1029 p = ((StgInd *)p)->indirectee;
1034 return ((StgEvacuated *)p)->evacuee;
1037 size = arr_words_sizeW((StgArrWords *)p);
1041 case MUT_ARR_PTRS_FROZEN:
1042 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1046 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1047 p = (StgClosure *)((StgTSO *)p)->link;
1051 size = tso_sizeW((StgTSO *)p);
1053 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1054 && Bdescr((P_)p)->evacuated)
1068 MarkRoot(StgClosure *root)
1070 # if 0 && defined(PAR) && defined(DEBUG)
1071 StgClosure *foo = evacuate(root);
1072 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1073 ASSERT(isAlive(foo)); // must be in to-space
1076 return evacuate(root);
1081 static void addBlock(step *stp)
1083 bdescr *bd = allocBlock();
1087 if (stp->gen->no <= N) {
1093 stp->hp_bd->free = stp->hp;
1094 stp->hp_bd->link = bd;
1095 stp->hp = bd->start;
1096 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1102 //@cindex upd_evacuee
1104 static __inline__ void
1105 upd_evacuee(StgClosure *p, StgClosure *dest)
1107 p->header.info = &stg_EVACUATED_info;
1108 ((StgEvacuated *)p)->evacuee = dest;
1113 static __inline__ StgClosure *
1114 copy(StgClosure *src, nat size, step *stp)
1118 TICK_GC_WORDS_COPIED(size);
1119 /* Find out where we're going, using the handy "to" pointer in
1120 * the step of the source object. If it turns out we need to
1121 * evacuate to an older generation, adjust it here (see comment
1124 if (stp->gen->no < evac_gen) {
1125 #ifdef NO_EAGER_PROMOTION
1126 failed_to_evac = rtsTrue;
1128 stp = &generations[evac_gen].steps[0];
1132 /* chain a new block onto the to-space for the destination step if
1135 if (stp->hp + size >= stp->hpLim) {
1139 for(to = stp->hp, from = (P_)src; size>0; --size) {
1145 upd_evacuee(src,(StgClosure *)dest);
1146 return (StgClosure *)dest;
1149 /* Special version of copy() for when we only want to copy the info
1150 * pointer of an object, but reserve some padding after it. This is
1151 * used to optimise evacuation of BLACKHOLEs.
1156 static __inline__ StgClosure *
1157 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1161 TICK_GC_WORDS_COPIED(size_to_copy);
1162 if (stp->gen->no < evac_gen) {
1163 #ifdef NO_EAGER_PROMOTION
1164 failed_to_evac = rtsTrue;
1166 stp = &generations[evac_gen].steps[0];
1170 if (stp->hp + size_to_reserve >= stp->hpLim) {
1174 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1179 stp->hp += size_to_reserve;
1180 upd_evacuee(src,(StgClosure *)dest);
1181 return (StgClosure *)dest;
1184 //@node Evacuation, Scavenging, Weak Pointers
1185 //@subsection Evacuation
1187 /* -----------------------------------------------------------------------------
1188 Evacuate a large object
1190 This just consists of removing the object from the (doubly-linked)
1191 large_alloc_list, and linking it on to the (singly-linked)
1192 new_large_objects list, from where it will be scavenged later.
1194 Convention: bd->evacuated is /= 0 for a large object that has been
1195 evacuated, or 0 otherwise.
1196 -------------------------------------------------------------------------- */
1198 //@cindex evacuate_large
1201 evacuate_large(StgPtr p, rtsBool mutable)
1203 bdescr *bd = Bdescr(p);
1206 /* should point to the beginning of the block */
1207 ASSERT(((W_)p & BLOCK_MASK) == 0);
1209 /* already evacuated? */
1210 if (bd->evacuated) {
1211 /* Don't forget to set the failed_to_evac flag if we didn't get
1212 * the desired destination (see comments in evacuate()).
1214 if (bd->gen->no < evac_gen) {
1215 failed_to_evac = rtsTrue;
1216 TICK_GC_FAILED_PROMOTION();
1222 /* remove from large_object list */
1224 bd->back->link = bd->link;
1225 } else { /* first object in the list */
1226 stp->large_objects = bd->link;
1229 bd->link->back = bd->back;
1232 /* link it on to the evacuated large object list of the destination step
1235 if (stp->gen->no < evac_gen) {
1236 #ifdef NO_EAGER_PROMOTION
1237 failed_to_evac = rtsTrue;
1239 stp = &generations[evac_gen].steps[0];
1245 bd->link = stp->new_large_objects;
1246 stp->new_large_objects = bd;
1250 recordMutable((StgMutClosure *)p);
1254 /* -----------------------------------------------------------------------------
1255 Adding a MUT_CONS to an older generation.
1257 This is necessary from time to time when we end up with an
1258 old-to-new generation pointer in a non-mutable object. We defer
1259 the promotion until the next GC.
1260 -------------------------------------------------------------------------- */
1265 mkMutCons(StgClosure *ptr, generation *gen)
1270 stp = &gen->steps[0];
1272 /* chain a new block onto the to-space for the destination step if
1275 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1279 q = (StgMutVar *)stp->hp;
1280 stp->hp += sizeofW(StgMutVar);
1282 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1284 recordOldToNewPtrs((StgMutClosure *)q);
1286 return (StgClosure *)q;
1289 /* -----------------------------------------------------------------------------
1292 This is called (eventually) for every live object in the system.
1294 The caller to evacuate specifies a desired generation in the
1295 evac_gen global variable. The following conditions apply to
1296 evacuating an object which resides in generation M when we're
1297 collecting up to generation N
1301 else evac to step->to
1303 if M < evac_gen evac to evac_gen, step 0
1305 if the object is already evacuated, then we check which generation
1308 if M >= evac_gen do nothing
1309 if M < evac_gen set failed_to_evac flag to indicate that we
1310 didn't manage to evacuate this object into evac_gen.
1312 -------------------------------------------------------------------------- */
1316 evacuate(StgClosure *q)
1321 const StgInfoTable *info;
1324 if (HEAP_ALLOCED(q)) {
1326 if (bd->gen->no > N) {
1327 /* Can't evacuate this object, because it's in a generation
1328 * older than the ones we're collecting. Let's hope that it's
1329 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1331 if (bd->gen->no < evac_gen) {
1333 failed_to_evac = rtsTrue;
1334 TICK_GC_FAILED_PROMOTION();
1341 else stp = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1344 /* make sure the info pointer is into text space */
1345 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1346 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1349 if (info->type==RBH) {
1350 info = REVERT_INFOPTR(info);
1352 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1353 q, info_type(q), info, info_type_by_ip(info)));
1357 switch (info -> type) {
1360 ASSERT(q->header.info != &stg_MUT_CONS_info);
1362 to = copy(q,sizeW_fromITBL(info),stp);
1363 recordMutable((StgMutClosure *)to);
1368 StgWord w = (StgWord)q->payload[0];
1369 if (q->header.info == Czh_con_info &&
1370 /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */
1371 (StgChar)w <= MAX_CHARLIKE) {
1372 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1374 if (q->header.info == Izh_con_info &&
1375 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1376 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1378 /* else, fall through ... */
1384 return copy(q,sizeofW(StgHeader)+1,stp);
1386 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1391 #ifdef NO_PROMOTE_THUNKS
1392 if (bd->gen->no == 0 &&
1393 bd->step->no != 0 &&
1394 bd->step->no == bd->gen->n_steps-1) {
1398 return copy(q,sizeofW(StgHeader)+2,stp);
1406 return copy(q,sizeofW(StgHeader)+2,stp);
1412 case IND_OLDGEN_PERM:
1417 return copy(q,sizeW_fromITBL(info),stp);
1420 case SE_CAF_BLACKHOLE:
1423 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1426 to = copy(q,BLACKHOLE_sizeW(),stp);
1427 recordMutable((StgMutClosure *)to);
1430 case THUNK_SELECTOR:
1432 const StgInfoTable* selectee_info;
1433 StgClosure* selectee = ((StgSelector*)q)->selectee;
1436 selectee_info = get_itbl(selectee);
1437 switch (selectee_info->type) {
1446 StgWord32 offset = info->layout.selector_offset;
1448 /* check that the size is in range */
1450 (StgWord32)(selectee_info->layout.payload.ptrs +
1451 selectee_info->layout.payload.nptrs));
1453 /* perform the selection! */
1454 q = selectee->payload[offset];
1456 /* if we're already in to-space, there's no need to continue
1457 * with the evacuation, just update the source address with
1458 * a pointer to the (evacuated) constructor field.
1460 if (HEAP_ALLOCED(q)) {
1461 bdescr *bd = Bdescr((P_)q);
1462 if (bd->evacuated) {
1463 if (bd->gen->no < evac_gen) {
1464 failed_to_evac = rtsTrue;
1465 TICK_GC_FAILED_PROMOTION();
1471 /* otherwise, carry on and evacuate this constructor field,
1472 * (but not the constructor itself)
1481 case IND_OLDGEN_PERM:
1482 selectee = ((StgInd *)selectee)->indirectee;
1486 selectee = ((StgEvacuated *)selectee)->evacuee;
1489 case THUNK_SELECTOR:
1490 /* we can't recurse indefinitely in evacuate(), so set a
1491 * limit on the number of times we can go around this
1494 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1496 bd = Bdescr((P_)selectee);
1497 if (!bd->evacuated) {
1498 thunk_selector_depth++;
1499 selectee = evacuate(selectee);
1500 thunk_selector_depth--;
1504 /* otherwise, fall through... */
1515 case SE_CAF_BLACKHOLE:
1519 /* not evaluated yet */
1523 /* a copy of the top-level cases below */
1524 case RBH: // cf. BLACKHOLE_BQ
1526 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1527 to = copy(q,BLACKHOLE_sizeW(),stp);
1528 //ToDo: derive size etc from reverted IP
1529 //to = copy(q,size,stp);
1530 // recordMutable((StgMutClosure *)to);
1535 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1536 to = copy(q,sizeofW(StgBlockedFetch),stp);
1543 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1544 to = copy(q,sizeofW(StgFetchMe),stp);
1548 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1549 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1554 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1555 (int)(selectee_info->type));
1558 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1562 /* follow chains of indirections, don't evacuate them */
1563 q = ((StgInd*)q)->indirectee;
1567 if (info->srt_len > 0 && major_gc &&
1568 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1569 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1570 static_objects = (StgClosure *)q;
1575 if (info->srt_len > 0 && major_gc &&
1576 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1577 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1578 static_objects = (StgClosure *)q;
1583 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1584 * on the CAF list, so don't do anything with it here (we'll
1585 * scavenge it later).
1588 && ((StgIndStatic *)q)->saved_info == NULL
1589 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1590 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1591 static_objects = (StgClosure *)q;
1596 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1597 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1598 static_objects = (StgClosure *)q;
1602 case CONSTR_INTLIKE:
1603 case CONSTR_CHARLIKE:
1604 case CONSTR_NOCAF_STATIC:
1605 /* no need to put these on the static linked list, they don't need
1620 /* shouldn't see these */
1621 barf("evacuate: stack frame at %p\n", q);
1625 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1626 * of stack, tagging and all.
1628 * They can be larger than a block in size. Both are only
1629 * allocated via allocate(), so they should be chained on to the
1630 * large_object list.
1633 nat size = pap_sizeW((StgPAP*)q);
1634 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1635 evacuate_large((P_)q, rtsFalse);
1638 return copy(q,size,stp);
1643 /* Already evacuated, just return the forwarding address.
1644 * HOWEVER: if the requested destination generation (evac_gen) is
1645 * older than the actual generation (because the object was
1646 * already evacuated to a younger generation) then we have to
1647 * set the failed_to_evac flag to indicate that we couldn't
1648 * manage to promote the object to the desired generation.
1650 if (evac_gen > 0) { /* optimisation */
1651 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1652 if (Bdescr((P_)p)->gen->no < evac_gen) {
1653 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1654 failed_to_evac = rtsTrue;
1655 TICK_GC_FAILED_PROMOTION();
1658 return ((StgEvacuated*)q)->evacuee;
1662 nat size = arr_words_sizeW((StgArrWords *)q);
1664 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1665 evacuate_large((P_)q, rtsFalse);
1668 /* just copy the block */
1669 return copy(q,size,stp);
1674 case MUT_ARR_PTRS_FROZEN:
1676 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1678 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1679 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1682 /* just copy the block */
1683 to = copy(q,size,stp);
1684 if (info->type == MUT_ARR_PTRS) {
1685 recordMutable((StgMutClosure *)to);
1693 StgTSO *tso = (StgTSO *)q;
1694 nat size = tso_sizeW(tso);
1697 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1699 if (tso->what_next == ThreadRelocated) {
1700 q = (StgClosure *)tso->link;
1704 /* Large TSOs don't get moved, so no relocation is required.
1706 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1707 evacuate_large((P_)q, rtsTrue);
1710 /* To evacuate a small TSO, we need to relocate the update frame
1714 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1716 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1718 /* relocate the stack pointers... */
1719 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1720 new_tso->sp = (StgPtr)new_tso->sp + diff;
1722 relocate_TSO(tso, new_tso);
1724 recordMutable((StgMutClosure *)new_tso);
1725 return (StgClosure *)new_tso;
1730 case RBH: // cf. BLACKHOLE_BQ
1732 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1733 to = copy(q,BLACKHOLE_sizeW(),stp);
1734 //ToDo: derive size etc from reverted IP
1735 //to = copy(q,size,stp);
1736 recordMutable((StgMutClosure *)to);
1738 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1739 q, info_type(q), to, info_type(to)));
1744 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1745 to = copy(q,sizeofW(StgBlockedFetch),stp);
1747 belch("@@ evacuate: %p (%s) to %p (%s)",
1748 q, info_type(q), to, info_type(to)));
1755 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1756 to = copy(q,sizeofW(StgFetchMe),stp);
1758 belch("@@ evacuate: %p (%s) to %p (%s)",
1759 q, info_type(q), to, info_type(to)));
1763 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1764 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1766 belch("@@ evacuate: %p (%s) to %p (%s)",
1767 q, info_type(q), to, info_type(to)));
1772 barf("evacuate: strange closure type %d", (int)(info->type));
1778 /* -----------------------------------------------------------------------------
1779 relocate_TSO is called just after a TSO has been copied from src to
1780 dest. It adjusts the update frame list for the new location.
1781 -------------------------------------------------------------------------- */
1782 //@cindex relocate_TSO
1785 relocate_TSO(StgTSO *src, StgTSO *dest)
1792 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1796 while ((P_)su < dest->stack + dest->stack_size) {
1797 switch (get_itbl(su)->type) {
1799 /* GCC actually manages to common up these three cases! */
1802 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1807 cf = (StgCatchFrame *)su;
1808 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1813 sf = (StgSeqFrame *)su;
1814 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1823 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1831 //@node Scavenging, Reverting CAFs, Evacuation
1832 //@subsection Scavenging
1834 //@cindex scavenge_srt
1837 scavenge_srt(const StgInfoTable *info)
1839 StgClosure **srt, **srt_end;
1841 /* evacuate the SRT. If srt_len is zero, then there isn't an
1842 * srt field in the info table. That's ok, because we'll
1843 * never dereference it.
1845 srt = (StgClosure **)(info->srt);
1846 srt_end = srt + info->srt_len;
1847 for (; srt < srt_end; srt++) {
1848 /* Special-case to handle references to closures hiding out in DLLs, since
1849 double indirections required to get at those. The code generator knows
1850 which is which when generating the SRT, so it stores the (indirect)
1851 reference to the DLL closure in the table by first adding one to it.
1852 We check for this here, and undo the addition before evacuating it.
1854 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1855 closure that's fixed at link-time, and no extra magic is required.
1857 #ifdef ENABLE_WIN32_DLL_SUPPORT
1858 if ( (unsigned long)(*srt) & 0x1 ) {
1859 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1869 /* -----------------------------------------------------------------------------
1871 -------------------------------------------------------------------------- */
1874 scavengeTSO (StgTSO *tso)
1876 /* chase the link field for any TSOs on the same queue */
1877 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1878 if ( tso->why_blocked == BlockedOnMVar
1879 || tso->why_blocked == BlockedOnBlackHole
1880 || tso->why_blocked == BlockedOnException
1882 || tso->why_blocked == BlockedOnGA
1883 || tso->why_blocked == BlockedOnGA_NoSend
1886 tso->block_info.closure = evacuate(tso->block_info.closure);
1888 if ( tso->blocked_exceptions != NULL ) {
1889 tso->blocked_exceptions =
1890 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1892 /* scavenge this thread's stack */
1893 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1896 /* -----------------------------------------------------------------------------
1897 Scavenge a given step until there are no more objects in this step
1900 evac_gen is set by the caller to be either zero (for a step in a
1901 generation < N) or G where G is the generation of the step being
1904 We sometimes temporarily change evac_gen back to zero if we're
1905 scavenging a mutable object where early promotion isn't such a good
1907 -------------------------------------------------------------------------- */
1914 const StgInfoTable *info;
1916 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1921 failed_to_evac = rtsFalse;
1923 /* scavenge phase - standard breadth-first scavenging of the
1927 while (bd != stp->hp_bd || p < stp->hp) {
1929 /* If we're at the end of this block, move on to the next block */
1930 if (bd != stp->hp_bd && p == bd->free) {
1936 q = p; /* save ptr to object */
1938 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1939 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1941 info = get_itbl((StgClosure *)p);
1943 if (info->type==RBH)
1944 info = REVERT_INFOPTR(info);
1947 switch (info -> type) {
1950 /* treat MVars specially, because we don't want to evacuate the
1951 * mut_link field in the middle of the closure.
1954 StgMVar *mvar = ((StgMVar *)p);
1956 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1957 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1958 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1959 p += sizeofW(StgMVar);
1960 evac_gen = saved_evac_gen;
1968 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1969 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1970 p += sizeofW(StgHeader) + 2;
1975 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1976 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1982 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1983 p += sizeofW(StgHeader) + 1;
1988 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1994 p += sizeofW(StgHeader) + 1;
2001 p += sizeofW(StgHeader) + 2;
2008 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2009 p += sizeofW(StgHeader) + 2;
2025 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2026 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2027 (StgClosure *)*p = evacuate((StgClosure *)*p);
2029 p += info->layout.payload.nptrs;
2034 if (stp->gen->no != 0) {
2035 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2038 case IND_OLDGEN_PERM:
2039 ((StgIndOldGen *)p)->indirectee =
2040 evacuate(((StgIndOldGen *)p)->indirectee);
2041 if (failed_to_evac) {
2042 failed_to_evac = rtsFalse;
2043 recordOldToNewPtrs((StgMutClosure *)p);
2045 p += sizeofW(StgIndOldGen);
2049 /* ignore MUT_CONSs */
2050 if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) {
2052 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2053 evac_gen = saved_evac_gen;
2055 p += sizeofW(StgMutVar);
2059 case SE_CAF_BLACKHOLE:
2062 p += BLACKHOLE_sizeW();
2067 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2068 (StgClosure *)bh->blocking_queue =
2069 evacuate((StgClosure *)bh->blocking_queue);
2070 if (failed_to_evac) {
2071 failed_to_evac = rtsFalse;
2072 recordMutable((StgMutClosure *)bh);
2074 p += BLACKHOLE_sizeW();
2078 case THUNK_SELECTOR:
2080 StgSelector *s = (StgSelector *)p;
2081 s->selectee = evacuate(s->selectee);
2082 p += THUNK_SELECTOR_sizeW();
2088 barf("scavenge:IND???\n");
2090 case CONSTR_INTLIKE:
2091 case CONSTR_CHARLIKE:
2093 case CONSTR_NOCAF_STATIC:
2097 /* Shouldn't see a static object here. */
2098 barf("scavenge: STATIC object\n");
2110 /* Shouldn't see stack frames here. */
2111 barf("scavenge: stack frame\n");
2113 case AP_UPD: /* same as PAPs */
2115 /* Treat a PAP just like a section of stack, not forgetting to
2116 * evacuate the function pointer too...
2119 StgPAP* pap = (StgPAP *)p;
2121 pap->fun = evacuate(pap->fun);
2122 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2123 p += pap_sizeW(pap);
2128 /* nothing to follow */
2129 p += arr_words_sizeW((StgArrWords *)p);
2133 /* follow everything */
2137 evac_gen = 0; /* repeatedly mutable */
2138 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2139 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2140 (StgClosure *)*p = evacuate((StgClosure *)*p);
2142 evac_gen = saved_evac_gen;
2146 case MUT_ARR_PTRS_FROZEN:
2147 /* follow everything */
2149 StgPtr start = p, next;
2151 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2152 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2153 (StgClosure *)*p = evacuate((StgClosure *)*p);
2155 if (failed_to_evac) {
2156 /* we can do this easier... */
2157 recordMutable((StgMutClosure *)start);
2158 failed_to_evac = rtsFalse;
2165 StgTSO *tso = (StgTSO *)p;
2168 evac_gen = saved_evac_gen;
2169 p += tso_sizeW(tso);
2174 case RBH: // cf. BLACKHOLE_BQ
2176 // nat size, ptrs, nonptrs, vhs;
2178 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2179 StgRBH *rbh = (StgRBH *)p;
2180 (StgClosure *)rbh->blocking_queue =
2181 evacuate((StgClosure *)rbh->blocking_queue);
2182 if (failed_to_evac) {
2183 failed_to_evac = rtsFalse;
2184 recordMutable((StgMutClosure *)rbh);
2187 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2188 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2189 // ToDo: use size of reverted closure here!
2190 p += BLACKHOLE_sizeW();
2196 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2197 /* follow the pointer to the node which is being demanded */
2198 (StgClosure *)bf->node =
2199 evacuate((StgClosure *)bf->node);
2200 /* follow the link to the rest of the blocking queue */
2201 (StgClosure *)bf->link =
2202 evacuate((StgClosure *)bf->link);
2203 if (failed_to_evac) {
2204 failed_to_evac = rtsFalse;
2205 recordMutable((StgMutClosure *)bf);
2208 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2209 bf, info_type((StgClosure *)bf),
2210 bf->node, info_type(bf->node)));
2211 p += sizeofW(StgBlockedFetch);
2219 p += sizeofW(StgFetchMe);
2220 break; // nothing to do in this case
2222 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2224 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2225 (StgClosure *)fmbq->blocking_queue =
2226 evacuate((StgClosure *)fmbq->blocking_queue);
2227 if (failed_to_evac) {
2228 failed_to_evac = rtsFalse;
2229 recordMutable((StgMutClosure *)fmbq);
2232 belch("@@ scavenge: %p (%s) exciting, isn't it",
2233 p, info_type((StgClosure *)p)));
2234 p += sizeofW(StgFetchMeBlockingQueue);
2240 barf("scavenge: unimplemented/strange closure type %d @ %p",
2244 barf("scavenge: unimplemented/strange closure type %d @ %p",
2248 /* If we didn't manage to promote all the objects pointed to by
2249 * the current object, then we have to designate this object as
2250 * mutable (because it contains old-to-new generation pointers).
2252 if (failed_to_evac) {
2253 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2254 failed_to_evac = rtsFalse;
2262 /* -----------------------------------------------------------------------------
2263 Scavenge one object.
2265 This is used for objects that are temporarily marked as mutable
2266 because they contain old-to-new generation pointers. Only certain
2267 objects can have this property.
2268 -------------------------------------------------------------------------- */
2269 //@cindex scavenge_one
2272 scavenge_one(StgClosure *p)
2274 const StgInfoTable *info;
2277 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2278 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2283 if (info->type==RBH)
2284 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2287 switch (info -> type) {
2290 case FUN_1_0: /* hardly worth specialising these guys */
2310 case IND_OLDGEN_PERM:
2314 end = (P_)p->payload + info->layout.payload.ptrs;
2315 for (q = (P_)p->payload; q < end; q++) {
2316 (StgClosure *)*q = evacuate((StgClosure *)*q);
2322 case SE_CAF_BLACKHOLE:
2327 case THUNK_SELECTOR:
2329 StgSelector *s = (StgSelector *)p;
2330 s->selectee = evacuate(s->selectee);
2334 case AP_UPD: /* same as PAPs */
2336 /* Treat a PAP just like a section of stack, not forgetting to
2337 * evacuate the function pointer too...
2340 StgPAP* pap = (StgPAP *)p;
2342 pap->fun = evacuate(pap->fun);
2343 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2348 /* This might happen if for instance a MUT_CONS was pointing to a
2349 * THUNK which has since been updated. The IND_OLDGEN will
2350 * be on the mutable list anyway, so we don't need to do anything
2356 barf("scavenge_one: strange object %d", (int)(info->type));
2359 no_luck = failed_to_evac;
2360 failed_to_evac = rtsFalse;
2365 /* -----------------------------------------------------------------------------
2366 Scavenging mutable lists.
2368 We treat the mutable list of each generation > N (i.e. all the
2369 generations older than the one being collected) as roots. We also
2370 remove non-mutable objects from the mutable list at this point.
2371 -------------------------------------------------------------------------- */
2372 //@cindex scavenge_mut_once_list
2375 scavenge_mut_once_list(generation *gen)
2377 const StgInfoTable *info;
2378 StgMutClosure *p, *next, *new_list;
2380 p = gen->mut_once_list;
2381 new_list = END_MUT_LIST;
2385 failed_to_evac = rtsFalse;
2387 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2389 /* make sure the info pointer is into text space */
2390 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2391 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2395 if (info->type==RBH)
2396 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2398 switch(info->type) {
2401 case IND_OLDGEN_PERM:
2403 /* Try to pull the indirectee into this generation, so we can
2404 * remove the indirection from the mutable list.
2406 ((StgIndOldGen *)p)->indirectee =
2407 evacuate(((StgIndOldGen *)p)->indirectee);
2410 if (RtsFlags.DebugFlags.gc)
2411 /* Debugging code to print out the size of the thing we just
2415 StgPtr start = gen->steps[0].scan;
2416 bdescr *start_bd = gen->steps[0].scan_bd;
2418 scavenge(&gen->steps[0]);
2419 if (start_bd != gen->steps[0].scan_bd) {
2420 size += (P_)BLOCK_ROUND_UP(start) - start;
2421 start_bd = start_bd->link;
2422 while (start_bd != gen->steps[0].scan_bd) {
2423 size += BLOCK_SIZE_W;
2424 start_bd = start_bd->link;
2426 size += gen->steps[0].scan -
2427 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2429 size = gen->steps[0].scan - start;
2431 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2435 /* failed_to_evac might happen if we've got more than two
2436 * generations, we're collecting only generation 0, the
2437 * indirection resides in generation 2 and the indirectee is
2440 if (failed_to_evac) {
2441 failed_to_evac = rtsFalse;
2442 p->mut_link = new_list;
2445 /* the mut_link field of an IND_STATIC is overloaded as the
2446 * static link field too (it just so happens that we don't need
2447 * both at the same time), so we need to NULL it out when
2448 * removing this object from the mutable list because the static
2449 * link fields are all assumed to be NULL before doing a major
2457 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2458 * it from the mutable list if possible by promoting whatever it
2461 ASSERT(p->header.info == &stg_MUT_CONS_info);
2462 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2463 /* didn't manage to promote everything, so put the
2464 * MUT_CONS back on the list.
2466 p->mut_link = new_list;
2472 /* shouldn't have anything else on the mutables list */
2473 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2477 gen->mut_once_list = new_list;
2480 //@cindex scavenge_mutable_list
2483 scavenge_mutable_list(generation *gen)
2485 const StgInfoTable *info;
2486 StgMutClosure *p, *next;
2488 p = gen->saved_mut_list;
2492 failed_to_evac = rtsFalse;
2494 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2496 /* make sure the info pointer is into text space */
2497 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2498 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2502 if (info->type==RBH)
2503 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2505 switch(info->type) {
2507 case MUT_ARR_PTRS_FROZEN:
2508 /* remove this guy from the mutable list, but follow the ptrs
2509 * anyway (and make sure they get promoted to this gen).
2514 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2516 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2517 (StgClosure *)*q = evacuate((StgClosure *)*q);
2521 if (failed_to_evac) {
2522 failed_to_evac = rtsFalse;
2523 p->mut_link = gen->mut_list;
2530 /* follow everything */
2531 p->mut_link = gen->mut_list;
2536 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2537 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2538 (StgClosure *)*q = evacuate((StgClosure *)*q);
2544 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2545 * it from the mutable list if possible by promoting whatever it
2548 ASSERT(p->header.info != &stg_MUT_CONS_info);
2549 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2550 p->mut_link = gen->mut_list;
2556 StgMVar *mvar = (StgMVar *)p;
2557 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2558 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2559 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2560 p->mut_link = gen->mut_list;
2567 StgTSO *tso = (StgTSO *)p;
2571 /* Don't take this TSO off the mutable list - it might still
2572 * point to some younger objects (because we set evac_gen to 0
2575 tso->mut_link = gen->mut_list;
2576 gen->mut_list = (StgMutClosure *)tso;
2582 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2583 (StgClosure *)bh->blocking_queue =
2584 evacuate((StgClosure *)bh->blocking_queue);
2585 p->mut_link = gen->mut_list;
2590 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2593 case IND_OLDGEN_PERM:
2594 /* Try to pull the indirectee into this generation, so we can
2595 * remove the indirection from the mutable list.
2598 ((StgIndOldGen *)p)->indirectee =
2599 evacuate(((StgIndOldGen *)p)->indirectee);
2602 if (failed_to_evac) {
2603 failed_to_evac = rtsFalse;
2604 p->mut_link = gen->mut_once_list;
2605 gen->mut_once_list = p;
2612 // HWL: check whether all of these are necessary
2614 case RBH: // cf. BLACKHOLE_BQ
2616 // nat size, ptrs, nonptrs, vhs;
2618 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2619 StgRBH *rbh = (StgRBH *)p;
2620 (StgClosure *)rbh->blocking_queue =
2621 evacuate((StgClosure *)rbh->blocking_queue);
2622 if (failed_to_evac) {
2623 failed_to_evac = rtsFalse;
2624 recordMutable((StgMutClosure *)rbh);
2626 // ToDo: use size of reverted closure here!
2627 p += BLACKHOLE_sizeW();
2633 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2634 /* follow the pointer to the node which is being demanded */
2635 (StgClosure *)bf->node =
2636 evacuate((StgClosure *)bf->node);
2637 /* follow the link to the rest of the blocking queue */
2638 (StgClosure *)bf->link =
2639 evacuate((StgClosure *)bf->link);
2640 if (failed_to_evac) {
2641 failed_to_evac = rtsFalse;
2642 recordMutable((StgMutClosure *)bf);
2644 p += sizeofW(StgBlockedFetch);
2650 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
2653 p += sizeofW(StgFetchMe);
2654 break; // nothing to do in this case
2656 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2658 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2659 (StgClosure *)fmbq->blocking_queue =
2660 evacuate((StgClosure *)fmbq->blocking_queue);
2661 if (failed_to_evac) {
2662 failed_to_evac = rtsFalse;
2663 recordMutable((StgMutClosure *)fmbq);
2665 p += sizeofW(StgFetchMeBlockingQueue);
2671 /* shouldn't have anything else on the mutables list */
2672 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2677 //@cindex scavenge_static
2680 scavenge_static(void)
2682 StgClosure* p = static_objects;
2683 const StgInfoTable *info;
2685 /* Always evacuate straight to the oldest generation for static
2687 evac_gen = oldest_gen->no;
2689 /* keep going until we've scavenged all the objects on the linked
2691 while (p != END_OF_STATIC_LIST) {
2695 if (info->type==RBH)
2696 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2698 /* make sure the info pointer is into text space */
2699 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2700 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2702 /* Take this object *off* the static_objects list,
2703 * and put it on the scavenged_static_objects list.
2705 static_objects = STATIC_LINK(info,p);
2706 STATIC_LINK(info,p) = scavenged_static_objects;
2707 scavenged_static_objects = p;
2709 switch (info -> type) {
2713 StgInd *ind = (StgInd *)p;
2714 ind->indirectee = evacuate(ind->indirectee);
2716 /* might fail to evacuate it, in which case we have to pop it
2717 * back on the mutable list (and take it off the
2718 * scavenged_static list because the static link and mut link
2719 * pointers are one and the same).
2721 if (failed_to_evac) {
2722 failed_to_evac = rtsFalse;
2723 scavenged_static_objects = STATIC_LINK(info,p);
2724 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2725 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2739 next = (P_)p->payload + info->layout.payload.ptrs;
2740 /* evacuate the pointers */
2741 for (q = (P_)p->payload; q < next; q++) {
2742 (StgClosure *)*q = evacuate((StgClosure *)*q);
2748 barf("scavenge_static: strange closure %d", (int)(info->type));
2751 ASSERT(failed_to_evac == rtsFalse);
2753 /* get the next static object from the list. Remember, there might
2754 * be more stuff on this list now that we've done some evacuating!
2755 * (static_objects is a global)
2761 /* -----------------------------------------------------------------------------
2762 scavenge_stack walks over a section of stack and evacuates all the
2763 objects pointed to by it. We can use the same code for walking
2764 PAPs, since these are just sections of copied stack.
2765 -------------------------------------------------------------------------- */
2766 //@cindex scavenge_stack
2769 scavenge_stack(StgPtr p, StgPtr stack_end)
2772 const StgInfoTable* info;
2775 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2778 * Each time around this loop, we are looking at a chunk of stack
2779 * that starts with either a pending argument section or an
2780 * activation record.
2783 while (p < stack_end) {
2786 /* If we've got a tag, skip over that many words on the stack */
2787 if (IS_ARG_TAG((W_)q)) {
2792 /* Is q a pointer to a closure?
2794 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2796 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2797 ASSERT(closure_STATIC((StgClosure *)q));
2799 /* otherwise, must be a pointer into the allocation space. */
2802 (StgClosure *)*p = evacuate((StgClosure *)q);
2808 * Otherwise, q must be the info pointer of an activation
2809 * record. All activation records have 'bitmap' style layout
2812 info = get_itbl((StgClosure *)p);
2814 switch (info->type) {
2816 /* Dynamic bitmap: the mask is stored on the stack */
2818 bitmap = ((StgRetDyn *)p)->liveness;
2819 p = (P_)&((StgRetDyn *)p)->payload[0];
2822 /* probably a slow-entry point return address: */
2830 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2831 old_p, p, old_p+1));
2833 p++; /* what if FHS!=1 !? -- HWL */
2838 /* Specialised code for update frames, since they're so common.
2839 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2840 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2844 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2846 nat type = get_itbl(frame->updatee)->type;
2848 p += sizeofW(StgUpdateFrame);
2849 if (type == EVACUATED) {
2850 frame->updatee = evacuate(frame->updatee);
2853 bdescr *bd = Bdescr((P_)frame->updatee);
2855 if (bd->gen->no > N) {
2856 if (bd->gen->no < evac_gen) {
2857 failed_to_evac = rtsTrue;
2862 /* Don't promote blackholes */
2864 if (!(stp->gen->no == 0 &&
2866 stp->no == stp->gen->n_steps-1)) {
2873 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2874 sizeofW(StgHeader), stp);
2875 frame->updatee = to;
2878 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
2879 frame->updatee = to;
2880 recordMutable((StgMutClosure *)to);
2883 /* will never be SE_{,CAF_}BLACKHOLE, since we
2884 don't push an update frame for single-entry thunks. KSW 1999-01. */
2885 barf("scavenge_stack: UPDATE_FRAME updatee");
2890 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2897 bitmap = info->layout.bitmap;
2899 /* this assumes that the payload starts immediately after the info-ptr */
2901 while (bitmap != 0) {
2902 if ((bitmap & 1) == 0) {
2903 (StgClosure *)*p = evacuate((StgClosure *)*p);
2906 bitmap = bitmap >> 1;
2913 /* large bitmap (> 32 entries) */
2918 StgLargeBitmap *large_bitmap;
2921 large_bitmap = info->layout.large_bitmap;
2924 for (i=0; i<large_bitmap->size; i++) {
2925 bitmap = large_bitmap->bitmap[i];
2926 q = p + sizeof(W_) * 8;
2927 while (bitmap != 0) {
2928 if ((bitmap & 1) == 0) {
2929 (StgClosure *)*p = evacuate((StgClosure *)*p);
2932 bitmap = bitmap >> 1;
2934 if (i+1 < large_bitmap->size) {
2936 (StgClosure *)*p = evacuate((StgClosure *)*p);
2942 /* and don't forget to follow the SRT */
2947 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2952 /*-----------------------------------------------------------------------------
2953 scavenge the large object list.
2955 evac_gen set by caller; similar games played with evac_gen as with
2956 scavenge() - see comment at the top of scavenge(). Most large
2957 objects are (repeatedly) mutable, so most of the time evac_gen will
2959 --------------------------------------------------------------------------- */
2960 //@cindex scavenge_large
2963 scavenge_large(step *stp)
2967 const StgInfoTable* info;
2968 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2970 evac_gen = 0; /* most objects are mutable */
2971 bd = stp->new_large_objects;
2973 for (; bd != NULL; bd = stp->new_large_objects) {
2975 /* take this object *off* the large objects list and put it on
2976 * the scavenged large objects list. This is so that we can
2977 * treat new_large_objects as a stack and push new objects on
2978 * the front when evacuating.
2980 stp->new_large_objects = bd->link;
2981 dbl_link_onto(bd, &stp->scavenged_large_objects);
2984 info = get_itbl((StgClosure *)p);
2986 switch (info->type) {
2988 /* only certain objects can be "large"... */
2991 /* nothing to follow */
2995 /* follow everything */
2999 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3000 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3001 (StgClosure *)*p = evacuate((StgClosure *)*p);
3006 case MUT_ARR_PTRS_FROZEN:
3007 /* follow everything */
3009 StgPtr start = p, next;
3011 evac_gen = saved_evac_gen; /* not really mutable */
3012 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3013 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3014 (StgClosure *)*p = evacuate((StgClosure *)*p);
3017 if (failed_to_evac) {
3018 recordMutable((StgMutClosure *)start);
3024 scavengeTSO((StgTSO *)p);
3030 StgPAP* pap = (StgPAP *)p;
3032 evac_gen = saved_evac_gen; /* not really mutable */
3033 pap->fun = evacuate(pap->fun);
3034 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3040 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3045 //@cindex zero_static_object_list
3048 zero_static_object_list(StgClosure* first_static)
3052 const StgInfoTable *info;
3054 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3056 link = STATIC_LINK(info, p);
3057 STATIC_LINK(info,p) = NULL;
3061 /* This function is only needed because we share the mutable link
3062 * field with the static link field in an IND_STATIC, so we have to
3063 * zero the mut_link field before doing a major GC, which needs the
3064 * static link field.
3066 * It doesn't do any harm to zero all the mutable link fields on the
3071 zero_mutable_list( StgMutClosure *first )
3073 StgMutClosure *next, *c;
3075 for (c = first; c != END_MUT_LIST; c = next) {
3081 /* -----------------------------------------------------------------------------
3083 -------------------------------------------------------------------------- */
3090 for (c = (StgIndStatic *)caf_list; c != NULL;
3091 c = (StgIndStatic *)c->static_link)
3093 c->header.info = c->saved_info;
3094 c->saved_info = NULL;
3095 /* could, but not necessary: c->static_link = NULL; */
3101 scavengeCAFs( void )
3106 for (c = (StgIndStatic *)caf_list; c != NULL;
3107 c = (StgIndStatic *)c->static_link)
3109 c->indirectee = evacuate(c->indirectee);
3113 /* -----------------------------------------------------------------------------
3114 Sanity code for CAF garbage collection.
3116 With DEBUG turned on, we manage a CAF list in addition to the SRT
3117 mechanism. After GC, we run down the CAF list and blackhole any
3118 CAFs which have been garbage collected. This means we get an error
3119 whenever the program tries to enter a garbage collected CAF.
3121 Any garbage collected CAFs are taken off the CAF list at the same
3123 -------------------------------------------------------------------------- */
3133 const StgInfoTable *info;
3144 ASSERT(info->type == IND_STATIC);
3146 if (STATIC_LINK(info,p) == NULL) {
3147 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3149 SET_INFO(p,&stg_BLACKHOLE_info);
3150 p = STATIC_LINK2(info,p);
3154 pp = &STATIC_LINK2(info,p);
3161 /* fprintf(stderr, "%d CAFs live\n", i); */
3165 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3166 //@subsection Lazy black holing
3168 /* -----------------------------------------------------------------------------
3171 Whenever a thread returns to the scheduler after possibly doing
3172 some work, we have to run down the stack and black-hole all the
3173 closures referred to by update frames.
3174 -------------------------------------------------------------------------- */
3175 //@cindex threadLazyBlackHole
3178 threadLazyBlackHole(StgTSO *tso)
3180 StgUpdateFrame *update_frame;
3181 StgBlockingQueue *bh;
3184 stack_end = &tso->stack[tso->stack_size];
3185 update_frame = tso->su;
3188 switch (get_itbl(update_frame)->type) {
3191 update_frame = ((StgCatchFrame *)update_frame)->link;
3195 bh = (StgBlockingQueue *)update_frame->updatee;
3197 /* if the thunk is already blackholed, it means we've also
3198 * already blackholed the rest of the thunks on this stack,
3199 * so we can stop early.
3201 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3202 * don't interfere with this optimisation.
3204 if (bh->header.info == &stg_BLACKHOLE_info) {
3208 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3209 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3210 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3211 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3213 SET_INFO(bh,&stg_BLACKHOLE_info);
3216 update_frame = update_frame->link;
3220 update_frame = ((StgSeqFrame *)update_frame)->link;
3226 barf("threadPaused");
3231 //@node Stack squeezing, Pausing a thread, Lazy black holing
3232 //@subsection Stack squeezing
3234 /* -----------------------------------------------------------------------------
3237 * Code largely pinched from old RTS, then hacked to bits. We also do
3238 * lazy black holing here.
3240 * -------------------------------------------------------------------------- */
3241 //@cindex threadSqueezeStack
3244 threadSqueezeStack(StgTSO *tso)
3246 lnat displacement = 0;
3247 StgUpdateFrame *frame;
3248 StgUpdateFrame *next_frame; /* Temporally next */
3249 StgUpdateFrame *prev_frame; /* Temporally previous */
3251 rtsBool prev_was_update_frame;
3253 StgUpdateFrame *top_frame;
3254 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3256 void printObj( StgClosure *obj ); // from Printer.c
3258 top_frame = tso->su;
3261 bottom = &(tso->stack[tso->stack_size]);
3264 /* There must be at least one frame, namely the STOP_FRAME.
3266 ASSERT((P_)frame < bottom);
3268 /* Walk down the stack, reversing the links between frames so that
3269 * we can walk back up as we squeeze from the bottom. Note that
3270 * next_frame and prev_frame refer to next and previous as they were
3271 * added to the stack, rather than the way we see them in this
3272 * walk. (It makes the next loop less confusing.)
3274 * Stop if we find an update frame pointing to a black hole
3275 * (see comment in threadLazyBlackHole()).
3279 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3280 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3281 prev_frame = frame->link;
3282 frame->link = next_frame;
3287 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3288 printObj((StgClosure *)prev_frame);
3289 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3292 switch (get_itbl(frame)->type) {
3295 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3308 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3310 printObj((StgClosure *)prev_frame);
3313 if (get_itbl(frame)->type == UPDATE_FRAME
3314 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3319 /* Now, we're at the bottom. Frame points to the lowest update
3320 * frame on the stack, and its link actually points to the frame
3321 * above. We have to walk back up the stack, squeezing out empty
3322 * update frames and turning the pointers back around on the way
3325 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3326 * we never want to eliminate it anyway. Just walk one step up
3327 * before starting to squeeze. When you get to the topmost frame,
3328 * remember that there are still some words above it that might have
3335 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3338 * Loop through all of the frames (everything except the very
3339 * bottom). Things are complicated by the fact that we have
3340 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3341 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3343 while (frame != NULL) {
3345 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3346 rtsBool is_update_frame;
3348 next_frame = frame->link;
3349 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3352 * 1. both the previous and current frame are update frames
3353 * 2. the current frame is empty
3355 if (prev_was_update_frame && is_update_frame &&
3356 (P_)prev_frame == frame_bottom + displacement) {
3358 /* Now squeeze out the current frame */
3359 StgClosure *updatee_keep = prev_frame->updatee;
3360 StgClosure *updatee_bypass = frame->updatee;
3363 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3367 /* Deal with blocking queues. If both updatees have blocked
3368 * threads, then we should merge the queues into the update
3369 * frame that we're keeping.
3371 * Alternatively, we could just wake them up: they'll just go
3372 * straight to sleep on the proper blackhole! This is less code
3373 * and probably less bug prone, although it's probably much
3376 #if 0 /* do it properly... */
3377 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3378 # error Unimplemented lazy BH warning. (KSW 1999-01)
3380 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3381 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3383 /* Sigh. It has one. Don't lose those threads! */
3384 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3385 /* Urgh. Two queues. Merge them. */
3386 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3388 while (keep_tso->link != END_TSO_QUEUE) {
3389 keep_tso = keep_tso->link;
3391 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3394 /* For simplicity, just swap the BQ for the BH */
3395 P_ temp = updatee_keep;
3397 updatee_keep = updatee_bypass;
3398 updatee_bypass = temp;
3400 /* Record the swap in the kept frame (below) */
3401 prev_frame->updatee = updatee_keep;
3406 TICK_UPD_SQUEEZED();
3407 /* wasn't there something about update squeezing and ticky to be
3408 * sorted out? oh yes: we aren't counting each enter properly
3409 * in this case. See the log somewhere. KSW 1999-04-21
3411 * Check two things: that the two update frames don't point to
3412 * the same object, and that the updatee_bypass isn't already an
3413 * indirection. Both of these cases only happen when we're in a
3414 * block hole-style loop (and there are multiple update frames
3415 * on the stack pointing to the same closure), but they can both
3416 * screw us up if we don't check.
3418 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3419 /* this wakes the threads up */
3420 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3423 sp = (P_)frame - 1; /* sp = stuff to slide */
3424 displacement += sizeofW(StgUpdateFrame);
3427 /* No squeeze for this frame */
3428 sp = frame_bottom - 1; /* Keep the current frame */
3430 /* Do lazy black-holing.
3432 if (is_update_frame) {
3433 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3434 if (bh->header.info != &stg_BLACKHOLE_info &&
3435 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3436 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3437 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3438 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3441 /* zero out the slop so that the sanity checker can tell
3442 * where the next closure is.
3445 StgInfoTable *info = get_itbl(bh);
3446 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3447 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3448 * info is used for a different purpose, and it's exactly the
3449 * same size as a BLACKHOLE in any case.
3451 if (info->type != THUNK_SELECTOR) {
3452 for (i = np; i < np + nw; i++) {
3453 ((StgClosure *)bh)->payload[i] = 0;
3458 SET_INFO(bh,&stg_BLACKHOLE_info);
3462 /* Fix the link in the current frame (should point to the frame below) */
3463 frame->link = prev_frame;
3464 prev_was_update_frame = is_update_frame;
3467 /* Now slide all words from sp up to the next frame */
3469 if (displacement > 0) {
3470 P_ next_frame_bottom;
3472 if (next_frame != NULL)
3473 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3475 next_frame_bottom = tso->sp - 1;
3479 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3483 while (sp >= next_frame_bottom) {
3484 sp[displacement] = *sp;
3488 (P_)prev_frame = (P_)frame + displacement;
3492 tso->sp += displacement;
3493 tso->su = prev_frame;
3496 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3497 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3501 //@node Pausing a thread, Index, Stack squeezing
3502 //@subsection Pausing a thread
3504 /* -----------------------------------------------------------------------------
3507 * We have to prepare for GC - this means doing lazy black holing
3508 * here. We also take the opportunity to do stack squeezing if it's
3510 * -------------------------------------------------------------------------- */
3511 //@cindex threadPaused
3513 threadPaused(StgTSO *tso)
3515 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3516 threadSqueezeStack(tso); /* does black holing too */
3518 threadLazyBlackHole(tso);
3521 /* -----------------------------------------------------------------------------
3523 * -------------------------------------------------------------------------- */
3526 //@cindex printMutOnceList
3528 printMutOnceList(generation *gen)
3530 StgMutClosure *p, *next;
3532 p = gen->mut_once_list;
3535 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3536 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3537 fprintf(stderr, "%p (%s), ",
3538 p, info_type((StgClosure *)p));
3540 fputc('\n', stderr);
3543 //@cindex printMutableList
3545 printMutableList(generation *gen)
3547 StgMutClosure *p, *next;
3552 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3553 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3554 fprintf(stderr, "%p (%s), ",
3555 p, info_type((StgClosure *)p));
3557 fputc('\n', stderr);
3560 //@cindex maybeLarge
3561 static inline rtsBool
3562 maybeLarge(StgClosure *closure)
3564 StgInfoTable *info = get_itbl(closure);
3566 /* closure types that may be found on the new_large_objects list;
3567 see scavenge_large */
3568 return (info->type == MUT_ARR_PTRS ||
3569 info->type == MUT_ARR_PTRS_FROZEN ||
3570 info->type == TSO ||
3571 info->type == ARR_WORDS);
3577 //@node Index, , Pausing a thread
3581 //* GarbageCollect:: @cindex\s-+GarbageCollect
3582 //* MarkRoot:: @cindex\s-+MarkRoot
3583 //* RevertCAFs:: @cindex\s-+RevertCAFs
3584 //* addBlock:: @cindex\s-+addBlock
3585 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3586 //* copy:: @cindex\s-+copy
3587 //* copyPart:: @cindex\s-+copyPart
3588 //* evacuate:: @cindex\s-+evacuate
3589 //* evacuate_large:: @cindex\s-+evacuate_large
3590 //* gcCAFs:: @cindex\s-+gcCAFs
3591 //* isAlive:: @cindex\s-+isAlive
3592 //* maybeLarge:: @cindex\s-+maybeLarge
3593 //* mkMutCons:: @cindex\s-+mkMutCons
3594 //* printMutOnceList:: @cindex\s-+printMutOnceList
3595 //* printMutableList:: @cindex\s-+printMutableList
3596 //* relocate_TSO:: @cindex\s-+relocate_TSO
3597 //* scavenge:: @cindex\s-+scavenge
3598 //* scavenge_large:: @cindex\s-+scavenge_large
3599 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3600 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3601 //* scavenge_one:: @cindex\s-+scavenge_one
3602 //* scavenge_srt:: @cindex\s-+scavenge_srt
3603 //* scavenge_stack:: @cindex\s-+scavenge_stack
3604 //* scavenge_static:: @cindex\s-+scavenge_static
3605 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3606 //* threadPaused:: @cindex\s-+threadPaused
3607 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3608 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3609 //* upd_evacuee:: @cindex\s-+upd_evacuee
3610 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3611 //* zero_static_object_list:: @cindex\s-+zero_static_object_list