1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.96 2001/02/11 17:51:07 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
45 #include "StablePriv.h"
47 #if defined(GRAN) || defined(PAR)
48 # include "GranSimRts.h"
49 # include "ParallelRts.h"
53 # include "ParallelDebug.h"
58 #if defined(RTS_GTK_FRONTPANEL)
59 #include "FrontPanel.h"
62 //@node STATIC OBJECT LIST, Static function declarations, Includes
63 //@subsection STATIC OBJECT LIST
65 /* STATIC OBJECT LIST.
68 * We maintain a linked list of static objects that are still live.
69 * The requirements for this list are:
71 * - we need to scan the list while adding to it, in order to
72 * scavenge all the static objects (in the same way that
73 * breadth-first scavenging works for dynamic objects).
75 * - we need to be able to tell whether an object is already on
76 * the list, to break loops.
78 * Each static object has a "static link field", which we use for
79 * linking objects on to the list. We use a stack-type list, consing
80 * objects on the front as they are added (this means that the
81 * scavenge phase is depth-first, not breadth-first, but that
84 * A separate list is kept for objects that have been scavenged
85 * already - this is so that we can zero all the marks afterwards.
87 * An object is on the list if its static link field is non-zero; this
88 * means that we have to mark the end of the list with '1', not NULL.
90 * Extra notes for generational GC:
92 * Each generation has a static object list associated with it. When
93 * collecting generations up to N, we treat the static object lists
94 * from generations > N as roots.
96 * We build up a static object list while collecting generations 0..N,
97 * which is then appended to the static object list of generation N+1.
99 StgClosure* static_objects; /* live static objects */
100 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
102 /* N is the oldest generation being collected, where the generations
103 * are numbered starting at 0. A major GC (indicated by the major_gc
104 * flag) is when we're collecting all generations. We only attempt to
105 * deal with static objects and GC CAFs when doing a major GC.
108 static rtsBool major_gc;
110 /* Youngest generation that objects should be evacuated to in
111 * evacuate(). (Logically an argument to evacuate, but it's static
112 * a lot of the time so we optimise it into a global variable).
118 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
119 static rtsBool weak_done; /* all done for this pass */
121 /* List of all threads during GC
123 static StgTSO *old_all_threads;
124 static StgTSO *resurrected_threads;
126 /* Flag indicating failure to evacuate an object to the desired
129 static rtsBool failed_to_evac;
131 /* Old to-space (used for two-space collector only)
133 bdescr *old_to_space;
135 /* Data used for allocation area sizing.
137 lnat new_blocks; /* blocks allocated during this GC */
138 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
140 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
141 //@subsection Static function declarations
143 /* -----------------------------------------------------------------------------
144 Static function declarations
145 -------------------------------------------------------------------------- */
147 static StgClosure * evacuate ( StgClosure *q );
148 static void zero_static_object_list ( StgClosure* first_static );
149 static void zero_mutable_list ( StgMutClosure *first );
151 static rtsBool traverse_weak_ptr_list ( void );
152 static void cleanup_weak_ptr_list ( StgWeak **list );
154 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
155 static void scavenge_large ( step * );
156 static void scavenge ( step * );
157 static void scavenge_static ( void );
158 static void scavenge_mutable_list ( generation *g );
159 static void scavenge_mut_once_list ( generation *g );
162 static void gcCAFs ( void );
165 void revertCAFs ( void );
166 void scavengeCAFs ( void );
168 //@node Garbage Collect, Weak Pointers, Static function declarations
169 //@subsection Garbage Collect
171 /* -----------------------------------------------------------------------------
174 For garbage collecting generation N (and all younger generations):
176 - follow all pointers in the root set. the root set includes all
177 mutable objects in all steps in all generations.
179 - for each pointer, evacuate the object it points to into either
180 + to-space in the next higher step in that generation, if one exists,
181 + if the object's generation == N, then evacuate it to the next
182 generation if one exists, or else to-space in the current
184 + if the object's generation < N, then evacuate it to to-space
185 in the next generation.
187 - repeatedly scavenge to-space from each step in each generation
188 being collected until no more objects can be evacuated.
190 - free from-space in each step, and set from-space = to-space.
192 -------------------------------------------------------------------------- */
193 //@cindex GarbageCollect
195 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
199 lnat live, allocated, collected = 0, copied = 0;
203 CostCentreStack *prev_CCS;
206 #if defined(DEBUG) && defined(GRAN)
207 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
211 /* tell the stats department that we've started a GC */
214 /* attribute any costs to CCS_GC */
220 /* Approximate how much we allocated.
221 * Todo: only when generating stats?
223 allocated = calcAllocated();
225 /* Figure out which generation to collect
227 if (force_major_gc) {
228 N = RtsFlags.GcFlags.generations - 1;
232 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
233 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
237 major_gc = (N == RtsFlags.GcFlags.generations-1);
240 #ifdef RTS_GTK_FRONTPANEL
241 if (RtsFlags.GcFlags.frontpanel) {
242 updateFrontPanelBeforeGC(N);
246 /* check stack sanity *before* GC (ToDo: check all threads) */
248 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
250 IF_DEBUG(sanity, checkFreeListSanity());
252 /* Initialise the static object lists
254 static_objects = END_OF_STATIC_LIST;
255 scavenged_static_objects = END_OF_STATIC_LIST;
257 /* zero the mutable list for the oldest generation (see comment by
258 * zero_mutable_list below).
261 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
264 /* Save the old to-space if we're doing a two-space collection
266 if (RtsFlags.GcFlags.generations == 1) {
267 old_to_space = g0s0->to_space;
268 g0s0->to_space = NULL;
271 /* Keep a count of how many new blocks we allocated during this GC
272 * (used for resizing the allocation area, later).
276 /* Initialise to-space in all the generations/steps that we're
279 for (g = 0; g <= N; g++) {
280 generations[g].mut_once_list = END_MUT_LIST;
281 generations[g].mut_list = END_MUT_LIST;
283 for (s = 0; s < generations[g].n_steps; s++) {
285 /* generation 0, step 0 doesn't need to-space */
286 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
290 /* Get a free block for to-space. Extra blocks will be chained on
294 stp = &generations[g].steps[s];
295 ASSERT(stp->gen->no == g);
296 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
297 bd->gen = &generations[g];
300 bd->evacuated = 1; /* it's a to-space block */
302 stp->hpLim = stp->hp + BLOCK_SIZE_W;
306 stp->scan = bd->start;
308 stp->new_large_objects = NULL;
309 stp->scavenged_large_objects = NULL;
311 /* mark the large objects as not evacuated yet */
312 for (bd = stp->large_objects; bd; bd = bd->link) {
318 /* make sure the older generations have at least one block to
319 * allocate into (this makes things easier for copy(), see below.
321 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
322 for (s = 0; s < generations[g].n_steps; s++) {
323 stp = &generations[g].steps[s];
324 if (stp->hp_bd == NULL) {
326 bd->gen = &generations[g];
329 bd->evacuated = 0; /* *not* a to-space block */
331 stp->hpLim = stp->hp + BLOCK_SIZE_W;
337 /* Set the scan pointer for older generations: remember we
338 * still have to scavenge objects that have been promoted. */
340 stp->scan_bd = stp->hp_bd;
341 stp->to_space = NULL;
343 stp->new_large_objects = NULL;
344 stp->scavenged_large_objects = NULL;
348 /* -----------------------------------------------------------------------
349 * follow all the roots that we know about:
350 * - mutable lists from each generation > N
351 * we want to *scavenge* these roots, not evacuate them: they're not
352 * going to move in this GC.
353 * Also: do them in reverse generation order. This is because we
354 * often want to promote objects that are pointed to by older
355 * generations early, so we don't have to repeatedly copy them.
356 * Doing the generations in reverse order ensures that we don't end
357 * up in the situation where we want to evac an object to gen 3 and
358 * it has already been evaced to gen 2.
362 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
363 generations[g].saved_mut_list = generations[g].mut_list;
364 generations[g].mut_list = END_MUT_LIST;
367 /* Do the mut-once lists first */
368 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
369 IF_PAR_DEBUG(verbose,
370 printMutOnceList(&generations[g]));
371 scavenge_mut_once_list(&generations[g]);
373 for (st = generations[g].n_steps-1; st >= 0; st--) {
374 scavenge(&generations[g].steps[st]);
378 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
379 IF_PAR_DEBUG(verbose,
380 printMutableList(&generations[g]));
381 scavenge_mutable_list(&generations[g]);
383 for (st = generations[g].n_steps-1; st >= 0; st--) {
384 scavenge(&generations[g].steps[st]);
391 /* follow all the roots that the application knows about.
397 /* And don't forget to mark the TSO if we got here direct from
399 /* Not needed in a seq version?
401 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
405 /* Mark the entries in the GALA table of the parallel system */
406 markLocalGAs(major_gc);
409 /* Mark the weak pointer list, and prepare to detect dead weak
412 old_weak_ptr_list = weak_ptr_list;
413 weak_ptr_list = NULL;
414 weak_done = rtsFalse;
416 /* The all_threads list is like the weak_ptr_list.
417 * See traverse_weak_ptr_list() for the details.
419 old_all_threads = all_threads;
420 all_threads = END_TSO_QUEUE;
421 resurrected_threads = END_TSO_QUEUE;
423 /* Mark the stable pointer table.
425 markStablePtrTable(major_gc);
429 /* ToDo: To fix the caf leak, we need to make the commented out
430 * parts of this code do something sensible - as described in
433 extern void markHugsObjects(void);
438 /* -------------------------------------------------------------------------
439 * Repeatedly scavenge all the areas we know about until there's no
440 * more scavenging to be done.
447 /* scavenge static objects */
448 if (major_gc && static_objects != END_OF_STATIC_LIST) {
450 checkStaticObjects());
454 /* When scavenging the older generations: Objects may have been
455 * evacuated from generations <= N into older generations, and we
456 * need to scavenge these objects. We're going to try to ensure that
457 * any evacuations that occur move the objects into at least the
458 * same generation as the object being scavenged, otherwise we
459 * have to create new entries on the mutable list for the older
463 /* scavenge each step in generations 0..maxgen */
467 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
468 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
469 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
472 stp = &generations[gen].steps[st];
474 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
479 if (stp->new_large_objects != NULL) {
487 if (flag) { goto loop; }
489 /* must be last... */
490 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
495 /* Final traversal of the weak pointer list (see comment by
496 * cleanUpWeakPtrList below).
498 cleanup_weak_ptr_list(&weak_ptr_list);
500 /* Now see which stable names are still alive.
502 gcStablePtrTable(major_gc);
505 /* Reconstruct the Global Address tables used in GUM */
506 rebuildGAtables(major_gc);
507 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
508 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
511 /* Set the maximum blocks for the oldest generation, based on twice
512 * the amount of live data now, adjusted to fit the maximum heap
515 * This is an approximation, since in the worst case we'll need
516 * twice the amount of live data plus whatever space the other
519 if (RtsFlags.GcFlags.generations > 1) {
521 oldest_gen->max_blocks =
522 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
523 RtsFlags.GcFlags.minOldGenSize);
524 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
525 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
526 if (((int)oldest_gen->max_blocks -
527 (int)oldest_gen->steps[0].to_blocks) <
528 (RtsFlags.GcFlags.pcFreeHeap *
529 RtsFlags.GcFlags.maxHeapSize / 200)) {
536 /* run through all the generations/steps and tidy up
538 copied = new_blocks * BLOCK_SIZE_W;
539 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
542 generations[g].collections++; /* for stats */
545 for (s = 0; s < generations[g].n_steps; s++) {
547 stp = &generations[g].steps[s];
549 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
550 /* Tidy the end of the to-space chains */
551 stp->hp_bd->free = stp->hp;
552 stp->hp_bd->link = NULL;
553 /* stats information: how much we copied */
555 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
560 /* for generations we collected... */
563 collected += stp->n_blocks * BLOCK_SIZE_W; /* for stats */
565 /* free old memory and shift to-space into from-space for all
566 * the collected steps (except the allocation area). These
567 * freed blocks will probaby be quickly recycled.
569 if (!(g == 0 && s == 0)) {
570 freeChain(stp->blocks);
571 stp->blocks = stp->to_space;
572 stp->n_blocks = stp->to_blocks;
573 stp->to_space = NULL;
575 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
576 bd->evacuated = 0; /* now from-space */
580 /* LARGE OBJECTS. The current live large objects are chained on
581 * scavenged_large, having been moved during garbage
582 * collection from large_objects. Any objects left on
583 * large_objects list are therefore dead, so we free them here.
585 for (bd = stp->large_objects; bd != NULL; bd = next) {
590 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
593 stp->large_objects = stp->scavenged_large_objects;
595 /* Set the maximum blocks for this generation, interpolating
596 * between the maximum size of the oldest and youngest
599 * max_blocks = oldgen_max_blocks * G
600 * ----------------------
605 generations[g].max_blocks = (oldest_gen->max_blocks * g)
606 / (RtsFlags.GcFlags.generations-1);
608 generations[g].max_blocks = oldest_gen->max_blocks;
611 /* for older generations... */
614 /* For older generations, we need to append the
615 * scavenged_large_object list (i.e. large objects that have been
616 * promoted during this GC) to the large_object list for that step.
618 for (bd = stp->scavenged_large_objects; bd; bd = next) {
621 dbl_link_onto(bd, &stp->large_objects);
624 /* add the new blocks we promoted during this GC */
625 stp->n_blocks += stp->to_blocks;
630 /* Guess the amount of live data for stats. */
633 /* Free the small objects allocated via allocate(), since this will
634 * all have been copied into G0S1 now.
636 if (small_alloc_list != NULL) {
637 freeChain(small_alloc_list);
639 small_alloc_list = NULL;
643 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
645 /* Two-space collector:
646 * Free the old to-space, and estimate the amount of live data.
648 if (RtsFlags.GcFlags.generations == 1) {
651 if (old_to_space != NULL) {
652 freeChain(old_to_space);
654 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
655 bd->evacuated = 0; /* now from-space */
658 /* For a two-space collector, we need to resize the nursery. */
660 /* set up a new nursery. Allocate a nursery size based on a
661 * function of the amount of live data (currently a factor of 2,
662 * should be configurable (ToDo)). Use the blocks from the old
663 * nursery if possible, freeing up any left over blocks.
665 * If we get near the maximum heap size, then adjust our nursery
666 * size accordingly. If the nursery is the same size as the live
667 * data (L), then we need 3L bytes. We can reduce the size of the
668 * nursery to bring the required memory down near 2L bytes.
670 * A normal 2-space collector would need 4L bytes to give the same
671 * performance we get from 3L bytes, reducing to the same
672 * performance at 2L bytes.
674 blocks = g0s0->to_blocks;
676 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
677 RtsFlags.GcFlags.maxHeapSize ) {
678 int adjusted_blocks; /* signed on purpose */
681 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
682 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
683 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
684 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
687 blocks = adjusted_blocks;
690 blocks *= RtsFlags.GcFlags.oldGenFactor;
691 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
692 blocks = RtsFlags.GcFlags.minAllocAreaSize;
695 resizeNursery(blocks);
698 /* Generational collector:
699 * If the user has given us a suggested heap size, adjust our
700 * allocation area to make best use of the memory available.
703 if (RtsFlags.GcFlags.heapSizeSuggestion) {
705 nat needed = calcNeeded(); /* approx blocks needed at next GC */
707 /* Guess how much will be live in generation 0 step 0 next time.
708 * A good approximation is the obtained by finding the
709 * percentage of g0s0 that was live at the last minor GC.
712 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
715 /* Estimate a size for the allocation area based on the
716 * information available. We might end up going slightly under
717 * or over the suggested heap size, but we should be pretty
720 * Formula: suggested - needed
721 * ----------------------------
722 * 1 + g0s0_pcnt_kept/100
724 * where 'needed' is the amount of memory needed at the next
725 * collection for collecting all steps except g0s0.
728 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
729 (100 + (int)g0s0_pcnt_kept);
731 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
732 blocks = RtsFlags.GcFlags.minAllocAreaSize;
735 resizeNursery((nat)blocks);
739 /* mark the garbage collected CAFs as dead */
740 #if 0 /* doesn't work at the moment */
742 if (major_gc) { gcCAFs(); }
746 /* zero the scavenged static object list */
748 zero_static_object_list(scavenged_static_objects);
755 /* start any pending finalizers */
756 scheduleFinalizers(old_weak_ptr_list);
758 /* send exceptions to any threads which were about to die */
759 resurrectThreads(resurrected_threads);
761 /* check sanity after GC */
762 IF_DEBUG(sanity, checkSanity(N));
764 /* extra GC trace info */
765 IF_DEBUG(gc, stat_describe_gens());
768 /* symbol-table based profiling */
769 /* heapCensus(to_space); */ /* ToDo */
772 /* restore enclosing cost centre */
778 /* check for memory leaks if sanity checking is on */
779 IF_DEBUG(sanity, memInventory());
781 #ifdef RTS_GTK_FRONTPANEL
782 if (RtsFlags.GcFlags.frontpanel) {
783 updateFrontPanelAfterGC( N, live );
787 /* ok, GC over: tell the stats department what happened. */
788 stat_endGC(allocated, collected, live, copied, N);
791 //@node Weak Pointers, Evacuation, Garbage Collect
792 //@subsection Weak Pointers
794 /* -----------------------------------------------------------------------------
797 traverse_weak_ptr_list is called possibly many times during garbage
798 collection. It returns a flag indicating whether it did any work
799 (i.e. called evacuate on any live pointers).
801 Invariant: traverse_weak_ptr_list is called when the heap is in an
802 idempotent state. That means that there are no pending
803 evacuate/scavenge operations. This invariant helps the weak
804 pointer code decide which weak pointers are dead - if there are no
805 new live weak pointers, then all the currently unreachable ones are
808 For generational GC: we just don't try to finalize weak pointers in
809 older generations than the one we're collecting. This could
810 probably be optimised by keeping per-generation lists of weak
811 pointers, but for a few weak pointers this scheme will work.
812 -------------------------------------------------------------------------- */
813 //@cindex traverse_weak_ptr_list
816 traverse_weak_ptr_list(void)
818 StgWeak *w, **last_w, *next_w;
820 rtsBool flag = rtsFalse;
822 if (weak_done) { return rtsFalse; }
824 /* doesn't matter where we evacuate values/finalizers to, since
825 * these pointers are treated as roots (iff the keys are alive).
829 last_w = &old_weak_ptr_list;
830 for (w = old_weak_ptr_list; w; w = next_w) {
832 /* First, this weak pointer might have been evacuated. If so,
833 * remove the forwarding pointer from the weak_ptr_list.
835 if (get_itbl(w)->type == EVACUATED) {
836 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
840 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
841 * called on a live weak pointer object. Just remove it.
843 if (w->header.info == &stg_DEAD_WEAK_info) {
844 next_w = ((StgDeadWeak *)w)->link;
849 ASSERT(get_itbl(w)->type == WEAK);
851 /* Now, check whether the key is reachable.
853 if ((new = isAlive(w->key))) {
855 /* evacuate the value and finalizer */
856 w->value = evacuate(w->value);
857 w->finalizer = evacuate(w->finalizer);
858 /* remove this weak ptr from the old_weak_ptr list */
860 /* and put it on the new weak ptr list */
862 w->link = weak_ptr_list;
865 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
875 /* Now deal with the all_threads list, which behaves somewhat like
876 * the weak ptr list. If we discover any threads that are about to
877 * become garbage, we wake them up and administer an exception.
880 StgTSO *t, *tmp, *next, **prev;
882 prev = &old_all_threads;
883 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
885 /* Threads which have finished or died get dropped from
888 switch (t->what_next) {
889 case ThreadRelocated:
895 next = t->global_link;
901 /* Threads which have already been determined to be alive are
902 * moved onto the all_threads list.
904 (StgClosure *)tmp = isAlive((StgClosure *)t);
906 next = tmp->global_link;
907 tmp->global_link = all_threads;
911 prev = &(t->global_link);
912 next = t->global_link;
917 /* If we didn't make any changes, then we can go round and kill all
918 * the dead weak pointers. The old_weak_ptr list is used as a list
919 * of pending finalizers later on.
921 if (flag == rtsFalse) {
922 cleanup_weak_ptr_list(&old_weak_ptr_list);
923 for (w = old_weak_ptr_list; w; w = w->link) {
924 w->finalizer = evacuate(w->finalizer);
927 /* And resurrect any threads which were about to become garbage.
930 StgTSO *t, *tmp, *next;
931 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
932 next = t->global_link;
933 (StgClosure *)tmp = evacuate((StgClosure *)t);
934 tmp->global_link = resurrected_threads;
935 resurrected_threads = tmp;
945 /* -----------------------------------------------------------------------------
946 After GC, the live weak pointer list may have forwarding pointers
947 on it, because a weak pointer object was evacuated after being
948 moved to the live weak pointer list. We remove those forwarding
951 Also, we don't consider weak pointer objects to be reachable, but
952 we must nevertheless consider them to be "live" and retain them.
953 Therefore any weak pointer objects which haven't as yet been
954 evacuated need to be evacuated now.
955 -------------------------------------------------------------------------- */
957 //@cindex cleanup_weak_ptr_list
960 cleanup_weak_ptr_list ( StgWeak **list )
962 StgWeak *w, **last_w;
965 for (w = *list; w; w = w->link) {
967 if (get_itbl(w)->type == EVACUATED) {
968 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
972 if (Bdescr((P_)w)->evacuated == 0) {
973 (StgClosure *)w = evacuate((StgClosure *)w);
980 /* -----------------------------------------------------------------------------
981 isAlive determines whether the given closure is still alive (after
982 a garbage collection) or not. It returns the new address of the
983 closure if it is alive, or NULL otherwise.
984 -------------------------------------------------------------------------- */
989 isAlive(StgClosure *p)
991 const StgInfoTable *info;
998 /* ToDo: for static closures, check the static link field.
999 * Problem here is that we sometimes don't set the link field, eg.
1000 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1003 /* ignore closures in generations that we're not collecting. */
1004 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
1008 switch (info->type) {
1013 case IND_OLDGEN: /* rely on compatible layout with StgInd */
1014 case IND_OLDGEN_PERM:
1015 /* follow indirections */
1016 p = ((StgInd *)p)->indirectee;
1021 return ((StgEvacuated *)p)->evacuee;
1024 size = arr_words_sizeW((StgArrWords *)p);
1028 case MUT_ARR_PTRS_FROZEN:
1029 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1033 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1034 p = (StgClosure *)((StgTSO *)p)->link;
1038 size = tso_sizeW((StgTSO *)p);
1040 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1041 && Bdescr((P_)p)->evacuated)
1055 MarkRoot(StgClosure *root)
1057 # if 0 && defined(PAR) && defined(DEBUG)
1058 StgClosure *foo = evacuate(root);
1059 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1060 ASSERT(isAlive(foo)); // must be in to-space
1063 return evacuate(root);
1068 static void addBlock(step *stp)
1070 bdescr *bd = allocBlock();
1074 if (stp->gen->no <= N) {
1080 stp->hp_bd->free = stp->hp;
1081 stp->hp_bd->link = bd;
1082 stp->hp = bd->start;
1083 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1089 //@cindex upd_evacuee
1091 static __inline__ void
1092 upd_evacuee(StgClosure *p, StgClosure *dest)
1094 p->header.info = &stg_EVACUATED_info;
1095 ((StgEvacuated *)p)->evacuee = dest;
1100 static __inline__ StgClosure *
1101 copy(StgClosure *src, nat size, step *stp)
1105 TICK_GC_WORDS_COPIED(size);
1106 /* Find out where we're going, using the handy "to" pointer in
1107 * the step of the source object. If it turns out we need to
1108 * evacuate to an older generation, adjust it here (see comment
1111 if (stp->gen->no < evac_gen) {
1112 #ifdef NO_EAGER_PROMOTION
1113 failed_to_evac = rtsTrue;
1115 stp = &generations[evac_gen].steps[0];
1119 /* chain a new block onto the to-space for the destination step if
1122 if (stp->hp + size >= stp->hpLim) {
1126 for(to = stp->hp, from = (P_)src; size>0; --size) {
1132 upd_evacuee(src,(StgClosure *)dest);
1133 return (StgClosure *)dest;
1136 /* Special version of copy() for when we only want to copy the info
1137 * pointer of an object, but reserve some padding after it. This is
1138 * used to optimise evacuation of BLACKHOLEs.
1143 static __inline__ StgClosure *
1144 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1148 TICK_GC_WORDS_COPIED(size_to_copy);
1149 if (stp->gen->no < evac_gen) {
1150 #ifdef NO_EAGER_PROMOTION
1151 failed_to_evac = rtsTrue;
1153 stp = &generations[evac_gen].steps[0];
1157 if (stp->hp + size_to_reserve >= stp->hpLim) {
1161 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1166 stp->hp += size_to_reserve;
1167 upd_evacuee(src,(StgClosure *)dest);
1168 return (StgClosure *)dest;
1171 //@node Evacuation, Scavenging, Weak Pointers
1172 //@subsection Evacuation
1174 /* -----------------------------------------------------------------------------
1175 Evacuate a large object
1177 This just consists of removing the object from the (doubly-linked)
1178 large_alloc_list, and linking it on to the (singly-linked)
1179 new_large_objects list, from where it will be scavenged later.
1181 Convention: bd->evacuated is /= 0 for a large object that has been
1182 evacuated, or 0 otherwise.
1183 -------------------------------------------------------------------------- */
1185 //@cindex evacuate_large
1188 evacuate_large(StgPtr p, rtsBool mutable)
1190 bdescr *bd = Bdescr(p);
1193 /* should point to the beginning of the block */
1194 ASSERT(((W_)p & BLOCK_MASK) == 0);
1196 /* already evacuated? */
1197 if (bd->evacuated) {
1198 /* Don't forget to set the failed_to_evac flag if we didn't get
1199 * the desired destination (see comments in evacuate()).
1201 if (bd->gen->no < evac_gen) {
1202 failed_to_evac = rtsTrue;
1203 TICK_GC_FAILED_PROMOTION();
1209 /* remove from large_object list */
1211 bd->back->link = bd->link;
1212 } else { /* first object in the list */
1213 stp->large_objects = bd->link;
1216 bd->link->back = bd->back;
1219 /* link it on to the evacuated large object list of the destination step
1222 if (stp->gen->no < evac_gen) {
1223 #ifdef NO_EAGER_PROMOTION
1224 failed_to_evac = rtsTrue;
1226 stp = &generations[evac_gen].steps[0];
1232 bd->link = stp->new_large_objects;
1233 stp->new_large_objects = bd;
1237 recordMutable((StgMutClosure *)p);
1241 /* -----------------------------------------------------------------------------
1242 Adding a MUT_CONS to an older generation.
1244 This is necessary from time to time when we end up with an
1245 old-to-new generation pointer in a non-mutable object. We defer
1246 the promotion until the next GC.
1247 -------------------------------------------------------------------------- */
1252 mkMutCons(StgClosure *ptr, generation *gen)
1257 stp = &gen->steps[0];
1259 /* chain a new block onto the to-space for the destination step if
1262 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1266 q = (StgMutVar *)stp->hp;
1267 stp->hp += sizeofW(StgMutVar);
1269 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1271 recordOldToNewPtrs((StgMutClosure *)q);
1273 return (StgClosure *)q;
1276 /* -----------------------------------------------------------------------------
1279 This is called (eventually) for every live object in the system.
1281 The caller to evacuate specifies a desired generation in the
1282 evac_gen global variable. The following conditions apply to
1283 evacuating an object which resides in generation M when we're
1284 collecting up to generation N
1288 else evac to step->to
1290 if M < evac_gen evac to evac_gen, step 0
1292 if the object is already evacuated, then we check which generation
1295 if M >= evac_gen do nothing
1296 if M < evac_gen set failed_to_evac flag to indicate that we
1297 didn't manage to evacuate this object into evac_gen.
1299 -------------------------------------------------------------------------- */
1303 evacuate(StgClosure *q)
1308 const StgInfoTable *info;
1311 if (HEAP_ALLOCED(q)) {
1313 if (bd->gen->no > N) {
1314 /* Can't evacuate this object, because it's in a generation
1315 * older than the ones we're collecting. Let's hope that it's
1316 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1318 if (bd->gen->no < evac_gen) {
1320 failed_to_evac = rtsTrue;
1321 TICK_GC_FAILED_PROMOTION();
1328 else stp = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1331 /* make sure the info pointer is into text space */
1332 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1333 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1336 if (info->type==RBH) {
1337 info = REVERT_INFOPTR(info);
1339 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1340 q, info_type(q), info, info_type_by_ip(info)));
1344 switch (info -> type) {
1347 ASSERT(q->header.info != &stg_MUT_CONS_info);
1349 to = copy(q,sizeW_fromITBL(info),stp);
1350 recordMutable((StgMutClosure *)to);
1355 StgWord w = (StgWord)q->payload[0];
1356 if (q->header.info == Czh_con_info &&
1357 /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */
1358 (StgChar)w <= MAX_CHARLIKE) {
1359 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1361 if (q->header.info == Izh_con_info &&
1362 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1363 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1365 /* else, fall through ... */
1371 return copy(q,sizeofW(StgHeader)+1,stp);
1373 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1378 #ifdef NO_PROMOTE_THUNKS
1379 if (bd->gen->no == 0 &&
1380 bd->step->no != 0 &&
1381 bd->step->no == bd->gen->n_steps-1) {
1385 return copy(q,sizeofW(StgHeader)+2,stp);
1393 return copy(q,sizeofW(StgHeader)+2,stp);
1399 case IND_OLDGEN_PERM:
1404 return copy(q,sizeW_fromITBL(info),stp);
1407 case SE_CAF_BLACKHOLE:
1410 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1413 to = copy(q,BLACKHOLE_sizeW(),stp);
1414 recordMutable((StgMutClosure *)to);
1417 case THUNK_SELECTOR:
1419 const StgInfoTable* selectee_info;
1420 StgClosure* selectee = ((StgSelector*)q)->selectee;
1423 selectee_info = get_itbl(selectee);
1424 switch (selectee_info->type) {
1433 StgWord32 offset = info->layout.selector_offset;
1435 /* check that the size is in range */
1437 (StgWord32)(selectee_info->layout.payload.ptrs +
1438 selectee_info->layout.payload.nptrs));
1440 /* perform the selection! */
1441 q = selectee->payload[offset];
1443 /* if we're already in to-space, there's no need to continue
1444 * with the evacuation, just update the source address with
1445 * a pointer to the (evacuated) constructor field.
1447 if (HEAP_ALLOCED(q)) {
1448 bdescr *bd = Bdescr((P_)q);
1449 if (bd->evacuated) {
1450 if (bd->gen->no < evac_gen) {
1451 failed_to_evac = rtsTrue;
1452 TICK_GC_FAILED_PROMOTION();
1458 /* otherwise, carry on and evacuate this constructor field,
1459 * (but not the constructor itself)
1468 case IND_OLDGEN_PERM:
1469 selectee = ((StgInd *)selectee)->indirectee;
1473 selectee = ((StgEvacuated *)selectee)->evacuee;
1484 case THUNK_SELECTOR:
1485 /* aargh - do recursively???? */
1487 case SE_CAF_BLACKHOLE:
1491 /* not evaluated yet */
1495 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1496 (int)(selectee_info->type));
1499 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1503 /* follow chains of indirections, don't evacuate them */
1504 q = ((StgInd*)q)->indirectee;
1508 if (info->srt_len > 0 && major_gc &&
1509 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1510 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1511 static_objects = (StgClosure *)q;
1516 if (info->srt_len > 0 && major_gc &&
1517 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1518 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1519 static_objects = (StgClosure *)q;
1524 /* a revertible CAF - it'll be on the CAF list, so don't do
1525 * anything with it here (we'll scavenge it later).
1527 if (((StgIndStatic *)q)->saved_info != NULL) {
1530 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1531 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1532 static_objects = (StgClosure *)q;
1537 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1538 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1539 static_objects = (StgClosure *)q;
1543 case CONSTR_INTLIKE:
1544 case CONSTR_CHARLIKE:
1545 case CONSTR_NOCAF_STATIC:
1546 /* no need to put these on the static linked list, they don't need
1561 /* shouldn't see these */
1562 barf("evacuate: stack frame at %p\n", q);
1566 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1567 * of stack, tagging and all.
1569 * They can be larger than a block in size. Both are only
1570 * allocated via allocate(), so they should be chained on to the
1571 * large_object list.
1574 nat size = pap_sizeW((StgPAP*)q);
1575 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1576 evacuate_large((P_)q, rtsFalse);
1579 return copy(q,size,stp);
1584 /* Already evacuated, just return the forwarding address.
1585 * HOWEVER: if the requested destination generation (evac_gen) is
1586 * older than the actual generation (because the object was
1587 * already evacuated to a younger generation) then we have to
1588 * set the failed_to_evac flag to indicate that we couldn't
1589 * manage to promote the object to the desired generation.
1591 if (evac_gen > 0) { /* optimisation */
1592 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1593 if (Bdescr((P_)p)->gen->no < evac_gen) {
1594 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1595 failed_to_evac = rtsTrue;
1596 TICK_GC_FAILED_PROMOTION();
1599 return ((StgEvacuated*)q)->evacuee;
1603 nat size = arr_words_sizeW((StgArrWords *)q);
1605 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1606 evacuate_large((P_)q, rtsFalse);
1609 /* just copy the block */
1610 return copy(q,size,stp);
1615 case MUT_ARR_PTRS_FROZEN:
1617 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1619 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1620 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1623 /* just copy the block */
1624 to = copy(q,size,stp);
1625 if (info->type == MUT_ARR_PTRS) {
1626 recordMutable((StgMutClosure *)to);
1634 StgTSO *tso = (StgTSO *)q;
1635 nat size = tso_sizeW(tso);
1638 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1640 if (tso->what_next == ThreadRelocated) {
1641 q = (StgClosure *)tso->link;
1645 /* Large TSOs don't get moved, so no relocation is required.
1647 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1648 evacuate_large((P_)q, rtsTrue);
1651 /* To evacuate a small TSO, we need to relocate the update frame
1655 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1657 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1659 /* relocate the stack pointers... */
1660 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1661 new_tso->sp = (StgPtr)new_tso->sp + diff;
1663 relocate_TSO(tso, new_tso);
1665 recordMutable((StgMutClosure *)new_tso);
1666 return (StgClosure *)new_tso;
1671 case RBH: // cf. BLACKHOLE_BQ
1673 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1674 to = copy(q,BLACKHOLE_sizeW(),stp);
1675 //ToDo: derive size etc from reverted IP
1676 //to = copy(q,size,stp);
1677 recordMutable((StgMutClosure *)to);
1679 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1680 q, info_type(q), to, info_type(to)));
1685 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1686 to = copy(q,sizeofW(StgBlockedFetch),stp);
1688 belch("@@ evacuate: %p (%s) to %p (%s)",
1689 q, info_type(q), to, info_type(to)));
1693 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1694 to = copy(q,sizeofW(StgFetchMe),stp);
1696 belch("@@ evacuate: %p (%s) to %p (%s)",
1697 q, info_type(q), to, info_type(to)));
1701 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1702 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1704 belch("@@ evacuate: %p (%s) to %p (%s)",
1705 q, info_type(q), to, info_type(to)));
1710 barf("evacuate: strange closure type %d", (int)(info->type));
1716 /* -----------------------------------------------------------------------------
1717 relocate_TSO is called just after a TSO has been copied from src to
1718 dest. It adjusts the update frame list for the new location.
1719 -------------------------------------------------------------------------- */
1720 //@cindex relocate_TSO
1723 relocate_TSO(StgTSO *src, StgTSO *dest)
1730 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1734 while ((P_)su < dest->stack + dest->stack_size) {
1735 switch (get_itbl(su)->type) {
1737 /* GCC actually manages to common up these three cases! */
1740 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1745 cf = (StgCatchFrame *)su;
1746 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1751 sf = (StgSeqFrame *)su;
1752 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1761 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1769 //@node Scavenging, Reverting CAFs, Evacuation
1770 //@subsection Scavenging
1772 //@cindex scavenge_srt
1775 scavenge_srt(const StgInfoTable *info)
1777 StgClosure **srt, **srt_end;
1779 /* evacuate the SRT. If srt_len is zero, then there isn't an
1780 * srt field in the info table. That's ok, because we'll
1781 * never dereference it.
1783 srt = (StgClosure **)(info->srt);
1784 srt_end = srt + info->srt_len;
1785 for (; srt < srt_end; srt++) {
1786 /* Special-case to handle references to closures hiding out in DLLs, since
1787 double indirections required to get at those. The code generator knows
1788 which is which when generating the SRT, so it stores the (indirect)
1789 reference to the DLL closure in the table by first adding one to it.
1790 We check for this here, and undo the addition before evacuating it.
1792 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1793 closure that's fixed at link-time, and no extra magic is required.
1795 #ifdef ENABLE_WIN32_DLL_SUPPORT
1796 if ( (unsigned long)(*srt) & 0x1 ) {
1797 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1807 /* -----------------------------------------------------------------------------
1809 -------------------------------------------------------------------------- */
1812 scavengeTSO (StgTSO *tso)
1814 /* chase the link field for any TSOs on the same queue */
1815 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1816 if ( tso->why_blocked == BlockedOnMVar
1817 || tso->why_blocked == BlockedOnBlackHole
1818 || tso->why_blocked == BlockedOnException
1820 || tso->why_blocked == BlockedOnGA
1821 || tso->why_blocked == BlockedOnGA_NoSend
1824 tso->block_info.closure = evacuate(tso->block_info.closure);
1826 if ( tso->blocked_exceptions != NULL ) {
1827 tso->blocked_exceptions =
1828 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1830 /* scavenge this thread's stack */
1831 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1834 /* -----------------------------------------------------------------------------
1835 Scavenge a given step until there are no more objects in this step
1838 evac_gen is set by the caller to be either zero (for a step in a
1839 generation < N) or G where G is the generation of the step being
1842 We sometimes temporarily change evac_gen back to zero if we're
1843 scavenging a mutable object where early promotion isn't such a good
1845 -------------------------------------------------------------------------- */
1852 const StgInfoTable *info;
1854 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1859 failed_to_evac = rtsFalse;
1861 /* scavenge phase - standard breadth-first scavenging of the
1865 while (bd != stp->hp_bd || p < stp->hp) {
1867 /* If we're at the end of this block, move on to the next block */
1868 if (bd != stp->hp_bd && p == bd->free) {
1874 q = p; /* save ptr to object */
1876 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1877 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1879 info = get_itbl((StgClosure *)p);
1881 if (info->type==RBH)
1882 info = REVERT_INFOPTR(info);
1885 switch (info -> type) {
1888 /* treat MVars specially, because we don't want to evacuate the
1889 * mut_link field in the middle of the closure.
1892 StgMVar *mvar = ((StgMVar *)p);
1894 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1895 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1896 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1897 p += sizeofW(StgMVar);
1898 evac_gen = saved_evac_gen;
1906 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1907 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1908 p += sizeofW(StgHeader) + 2;
1913 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1914 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1920 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1921 p += sizeofW(StgHeader) + 1;
1926 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1932 p += sizeofW(StgHeader) + 1;
1939 p += sizeofW(StgHeader) + 2;
1946 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1947 p += sizeofW(StgHeader) + 2;
1963 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1964 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1965 (StgClosure *)*p = evacuate((StgClosure *)*p);
1967 p += info->layout.payload.nptrs;
1972 if (stp->gen->no != 0) {
1973 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
1976 case IND_OLDGEN_PERM:
1977 ((StgIndOldGen *)p)->indirectee =
1978 evacuate(((StgIndOldGen *)p)->indirectee);
1979 if (failed_to_evac) {
1980 failed_to_evac = rtsFalse;
1981 recordOldToNewPtrs((StgMutClosure *)p);
1983 p += sizeofW(StgIndOldGen);
1987 /* ignore MUT_CONSs */
1988 if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) {
1990 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1991 evac_gen = saved_evac_gen;
1993 p += sizeofW(StgMutVar);
1997 case SE_CAF_BLACKHOLE:
2000 p += BLACKHOLE_sizeW();
2005 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2006 (StgClosure *)bh->blocking_queue =
2007 evacuate((StgClosure *)bh->blocking_queue);
2008 if (failed_to_evac) {
2009 failed_to_evac = rtsFalse;
2010 recordMutable((StgMutClosure *)bh);
2012 p += BLACKHOLE_sizeW();
2016 case THUNK_SELECTOR:
2018 StgSelector *s = (StgSelector *)p;
2019 s->selectee = evacuate(s->selectee);
2020 p += THUNK_SELECTOR_sizeW();
2026 barf("scavenge:IND???\n");
2028 case CONSTR_INTLIKE:
2029 case CONSTR_CHARLIKE:
2031 case CONSTR_NOCAF_STATIC:
2035 /* Shouldn't see a static object here. */
2036 barf("scavenge: STATIC object\n");
2048 /* Shouldn't see stack frames here. */
2049 barf("scavenge: stack frame\n");
2051 case AP_UPD: /* same as PAPs */
2053 /* Treat a PAP just like a section of stack, not forgetting to
2054 * evacuate the function pointer too...
2057 StgPAP* pap = (StgPAP *)p;
2059 pap->fun = evacuate(pap->fun);
2060 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2061 p += pap_sizeW(pap);
2066 /* nothing to follow */
2067 p += arr_words_sizeW((StgArrWords *)p);
2071 /* follow everything */
2075 evac_gen = 0; /* repeatedly mutable */
2076 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2077 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2078 (StgClosure *)*p = evacuate((StgClosure *)*p);
2080 evac_gen = saved_evac_gen;
2084 case MUT_ARR_PTRS_FROZEN:
2085 /* follow everything */
2087 StgPtr start = p, next;
2089 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2090 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2091 (StgClosure *)*p = evacuate((StgClosure *)*p);
2093 if (failed_to_evac) {
2094 /* we can do this easier... */
2095 recordMutable((StgMutClosure *)start);
2096 failed_to_evac = rtsFalse;
2103 StgTSO *tso = (StgTSO *)p;
2106 evac_gen = saved_evac_gen;
2107 p += tso_sizeW(tso);
2112 case RBH: // cf. BLACKHOLE_BQ
2114 // nat size, ptrs, nonptrs, vhs;
2116 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2117 StgRBH *rbh = (StgRBH *)p;
2118 (StgClosure *)rbh->blocking_queue =
2119 evacuate((StgClosure *)rbh->blocking_queue);
2120 if (failed_to_evac) {
2121 failed_to_evac = rtsFalse;
2122 recordMutable((StgMutClosure *)rbh);
2125 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2126 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2127 // ToDo: use size of reverted closure here!
2128 p += BLACKHOLE_sizeW();
2134 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2135 /* follow the pointer to the node which is being demanded */
2136 (StgClosure *)bf->node =
2137 evacuate((StgClosure *)bf->node);
2138 /* follow the link to the rest of the blocking queue */
2139 (StgClosure *)bf->link =
2140 evacuate((StgClosure *)bf->link);
2141 if (failed_to_evac) {
2142 failed_to_evac = rtsFalse;
2143 recordMutable((StgMutClosure *)bf);
2146 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2147 bf, info_type((StgClosure *)bf),
2148 bf->node, info_type(bf->node)));
2149 p += sizeofW(StgBlockedFetch);
2155 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2156 p, info_type((StgClosure *)p)));
2157 p += sizeofW(StgFetchMe);
2158 break; // nothing to do in this case
2160 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2162 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2163 (StgClosure *)fmbq->blocking_queue =
2164 evacuate((StgClosure *)fmbq->blocking_queue);
2165 if (failed_to_evac) {
2166 failed_to_evac = rtsFalse;
2167 recordMutable((StgMutClosure *)fmbq);
2170 belch("@@ scavenge: %p (%s) exciting, isn't it",
2171 p, info_type((StgClosure *)p)));
2172 p += sizeofW(StgFetchMeBlockingQueue);
2178 barf("scavenge: unimplemented/strange closure type %d @ %p",
2182 barf("scavenge: unimplemented/strange closure type %d @ %p",
2186 /* If we didn't manage to promote all the objects pointed to by
2187 * the current object, then we have to designate this object as
2188 * mutable (because it contains old-to-new generation pointers).
2190 if (failed_to_evac) {
2191 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2192 failed_to_evac = rtsFalse;
2200 /* -----------------------------------------------------------------------------
2201 Scavenge one object.
2203 This is used for objects that are temporarily marked as mutable
2204 because they contain old-to-new generation pointers. Only certain
2205 objects can have this property.
2206 -------------------------------------------------------------------------- */
2207 //@cindex scavenge_one
2210 scavenge_one(StgClosure *p)
2212 const StgInfoTable *info;
2215 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2216 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2221 if (info->type==RBH)
2222 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2225 switch (info -> type) {
2228 case FUN_1_0: /* hardly worth specialising these guys */
2248 case IND_OLDGEN_PERM:
2252 end = (P_)p->payload + info->layout.payload.ptrs;
2253 for (q = (P_)p->payload; q < end; q++) {
2254 (StgClosure *)*q = evacuate((StgClosure *)*q);
2260 case SE_CAF_BLACKHOLE:
2265 case THUNK_SELECTOR:
2267 StgSelector *s = (StgSelector *)p;
2268 s->selectee = evacuate(s->selectee);
2272 case AP_UPD: /* same as PAPs */
2274 /* Treat a PAP just like a section of stack, not forgetting to
2275 * evacuate the function pointer too...
2278 StgPAP* pap = (StgPAP *)p;
2280 pap->fun = evacuate(pap->fun);
2281 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2286 /* This might happen if for instance a MUT_CONS was pointing to a
2287 * THUNK which has since been updated. The IND_OLDGEN will
2288 * be on the mutable list anyway, so we don't need to do anything
2294 barf("scavenge_one: strange object %d", (int)(info->type));
2297 no_luck = failed_to_evac;
2298 failed_to_evac = rtsFalse;
2303 /* -----------------------------------------------------------------------------
2304 Scavenging mutable lists.
2306 We treat the mutable list of each generation > N (i.e. all the
2307 generations older than the one being collected) as roots. We also
2308 remove non-mutable objects from the mutable list at this point.
2309 -------------------------------------------------------------------------- */
2310 //@cindex scavenge_mut_once_list
2313 scavenge_mut_once_list(generation *gen)
2315 const StgInfoTable *info;
2316 StgMutClosure *p, *next, *new_list;
2318 p = gen->mut_once_list;
2319 new_list = END_MUT_LIST;
2323 failed_to_evac = rtsFalse;
2325 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2327 /* make sure the info pointer is into text space */
2328 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2329 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2333 if (info->type==RBH)
2334 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2336 switch(info->type) {
2339 case IND_OLDGEN_PERM:
2341 /* Try to pull the indirectee into this generation, so we can
2342 * remove the indirection from the mutable list.
2344 ((StgIndOldGen *)p)->indirectee =
2345 evacuate(((StgIndOldGen *)p)->indirectee);
2348 if (RtsFlags.DebugFlags.gc)
2349 /* Debugging code to print out the size of the thing we just
2353 StgPtr start = gen->steps[0].scan;
2354 bdescr *start_bd = gen->steps[0].scan_bd;
2356 scavenge(&gen->steps[0]);
2357 if (start_bd != gen->steps[0].scan_bd) {
2358 size += (P_)BLOCK_ROUND_UP(start) - start;
2359 start_bd = start_bd->link;
2360 while (start_bd != gen->steps[0].scan_bd) {
2361 size += BLOCK_SIZE_W;
2362 start_bd = start_bd->link;
2364 size += gen->steps[0].scan -
2365 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2367 size = gen->steps[0].scan - start;
2369 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2373 /* failed_to_evac might happen if we've got more than two
2374 * generations, we're collecting only generation 0, the
2375 * indirection resides in generation 2 and the indirectee is
2378 if (failed_to_evac) {
2379 failed_to_evac = rtsFalse;
2380 p->mut_link = new_list;
2383 /* the mut_link field of an IND_STATIC is overloaded as the
2384 * static link field too (it just so happens that we don't need
2385 * both at the same time), so we need to NULL it out when
2386 * removing this object from the mutable list because the static
2387 * link fields are all assumed to be NULL before doing a major
2395 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2396 * it from the mutable list if possible by promoting whatever it
2399 ASSERT(p->header.info == &stg_MUT_CONS_info);
2400 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2401 /* didn't manage to promote everything, so put the
2402 * MUT_CONS back on the list.
2404 p->mut_link = new_list;
2410 /* shouldn't have anything else on the mutables list */
2411 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2415 gen->mut_once_list = new_list;
2418 //@cindex scavenge_mutable_list
2421 scavenge_mutable_list(generation *gen)
2423 const StgInfoTable *info;
2424 StgMutClosure *p, *next;
2426 p = gen->saved_mut_list;
2430 failed_to_evac = rtsFalse;
2432 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2434 /* make sure the info pointer is into text space */
2435 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2436 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2440 if (info->type==RBH)
2441 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2443 switch(info->type) {
2445 case MUT_ARR_PTRS_FROZEN:
2446 /* remove this guy from the mutable list, but follow the ptrs
2447 * anyway (and make sure they get promoted to this gen).
2452 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2454 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2455 (StgClosure *)*q = evacuate((StgClosure *)*q);
2459 if (failed_to_evac) {
2460 failed_to_evac = rtsFalse;
2461 p->mut_link = gen->mut_list;
2468 /* follow everything */
2469 p->mut_link = gen->mut_list;
2474 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2475 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2476 (StgClosure *)*q = evacuate((StgClosure *)*q);
2482 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2483 * it from the mutable list if possible by promoting whatever it
2486 ASSERT(p->header.info != &stg_MUT_CONS_info);
2487 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2488 p->mut_link = gen->mut_list;
2494 StgMVar *mvar = (StgMVar *)p;
2495 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2496 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2497 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2498 p->mut_link = gen->mut_list;
2505 StgTSO *tso = (StgTSO *)p;
2509 /* Don't take this TSO off the mutable list - it might still
2510 * point to some younger objects (because we set evac_gen to 0
2513 tso->mut_link = gen->mut_list;
2514 gen->mut_list = (StgMutClosure *)tso;
2520 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2521 (StgClosure *)bh->blocking_queue =
2522 evacuate((StgClosure *)bh->blocking_queue);
2523 p->mut_link = gen->mut_list;
2528 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2531 case IND_OLDGEN_PERM:
2532 /* Try to pull the indirectee into this generation, so we can
2533 * remove the indirection from the mutable list.
2536 ((StgIndOldGen *)p)->indirectee =
2537 evacuate(((StgIndOldGen *)p)->indirectee);
2540 if (failed_to_evac) {
2541 failed_to_evac = rtsFalse;
2542 p->mut_link = gen->mut_once_list;
2543 gen->mut_once_list = p;
2550 // HWL: check whether all of these are necessary
2552 case RBH: // cf. BLACKHOLE_BQ
2554 // nat size, ptrs, nonptrs, vhs;
2556 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2557 StgRBH *rbh = (StgRBH *)p;
2558 (StgClosure *)rbh->blocking_queue =
2559 evacuate((StgClosure *)rbh->blocking_queue);
2560 if (failed_to_evac) {
2561 failed_to_evac = rtsFalse;
2562 recordMutable((StgMutClosure *)rbh);
2564 // ToDo: use size of reverted closure here!
2565 p += BLACKHOLE_sizeW();
2571 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2572 /* follow the pointer to the node which is being demanded */
2573 (StgClosure *)bf->node =
2574 evacuate((StgClosure *)bf->node);
2575 /* follow the link to the rest of the blocking queue */
2576 (StgClosure *)bf->link =
2577 evacuate((StgClosure *)bf->link);
2578 if (failed_to_evac) {
2579 failed_to_evac = rtsFalse;
2580 recordMutable((StgMutClosure *)bf);
2582 p += sizeofW(StgBlockedFetch);
2587 p += sizeofW(StgFetchMe);
2588 break; // nothing to do in this case
2590 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2592 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2593 (StgClosure *)fmbq->blocking_queue =
2594 evacuate((StgClosure *)fmbq->blocking_queue);
2595 if (failed_to_evac) {
2596 failed_to_evac = rtsFalse;
2597 recordMutable((StgMutClosure *)fmbq);
2599 p += sizeofW(StgFetchMeBlockingQueue);
2605 /* shouldn't have anything else on the mutables list */
2606 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2611 //@cindex scavenge_static
2614 scavenge_static(void)
2616 StgClosure* p = static_objects;
2617 const StgInfoTable *info;
2619 /* Always evacuate straight to the oldest generation for static
2621 evac_gen = oldest_gen->no;
2623 /* keep going until we've scavenged all the objects on the linked
2625 while (p != END_OF_STATIC_LIST) {
2629 if (info->type==RBH)
2630 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2632 /* make sure the info pointer is into text space */
2633 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2634 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2636 /* Take this object *off* the static_objects list,
2637 * and put it on the scavenged_static_objects list.
2639 static_objects = STATIC_LINK(info,p);
2640 STATIC_LINK(info,p) = scavenged_static_objects;
2641 scavenged_static_objects = p;
2643 switch (info -> type) {
2647 StgInd *ind = (StgInd *)p;
2648 ind->indirectee = evacuate(ind->indirectee);
2650 /* might fail to evacuate it, in which case we have to pop it
2651 * back on the mutable list (and take it off the
2652 * scavenged_static list because the static link and mut link
2653 * pointers are one and the same).
2655 if (failed_to_evac) {
2656 failed_to_evac = rtsFalse;
2657 scavenged_static_objects = STATIC_LINK(info,p);
2658 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2659 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2673 next = (P_)p->payload + info->layout.payload.ptrs;
2674 /* evacuate the pointers */
2675 for (q = (P_)p->payload; q < next; q++) {
2676 (StgClosure *)*q = evacuate((StgClosure *)*q);
2682 barf("scavenge_static: strange closure %d", (int)(info->type));
2685 ASSERT(failed_to_evac == rtsFalse);
2687 /* get the next static object from the list. Remember, there might
2688 * be more stuff on this list now that we've done some evacuating!
2689 * (static_objects is a global)
2695 /* -----------------------------------------------------------------------------
2696 scavenge_stack walks over a section of stack and evacuates all the
2697 objects pointed to by it. We can use the same code for walking
2698 PAPs, since these are just sections of copied stack.
2699 -------------------------------------------------------------------------- */
2700 //@cindex scavenge_stack
2703 scavenge_stack(StgPtr p, StgPtr stack_end)
2706 const StgInfoTable* info;
2709 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2712 * Each time around this loop, we are looking at a chunk of stack
2713 * that starts with either a pending argument section or an
2714 * activation record.
2717 while (p < stack_end) {
2720 /* If we've got a tag, skip over that many words on the stack */
2721 if (IS_ARG_TAG((W_)q)) {
2726 /* Is q a pointer to a closure?
2728 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2730 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2731 ASSERT(closure_STATIC((StgClosure *)q));
2733 /* otherwise, must be a pointer into the allocation space. */
2736 (StgClosure *)*p = evacuate((StgClosure *)q);
2742 * Otherwise, q must be the info pointer of an activation
2743 * record. All activation records have 'bitmap' style layout
2746 info = get_itbl((StgClosure *)p);
2748 switch (info->type) {
2750 /* Dynamic bitmap: the mask is stored on the stack */
2752 bitmap = ((StgRetDyn *)p)->liveness;
2753 p = (P_)&((StgRetDyn *)p)->payload[0];
2756 /* probably a slow-entry point return address: */
2764 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2765 old_p, p, old_p+1));
2767 p++; /* what if FHS!=1 !? -- HWL */
2772 /* Specialised code for update frames, since they're so common.
2773 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2774 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2778 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2780 nat type = get_itbl(frame->updatee)->type;
2782 p += sizeofW(StgUpdateFrame);
2783 if (type == EVACUATED) {
2784 frame->updatee = evacuate(frame->updatee);
2787 bdescr *bd = Bdescr((P_)frame->updatee);
2789 if (bd->gen->no > N) {
2790 if (bd->gen->no < evac_gen) {
2791 failed_to_evac = rtsTrue;
2796 /* Don't promote blackholes */
2798 if (!(stp->gen->no == 0 &&
2800 stp->no == stp->gen->n_steps-1)) {
2807 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2808 sizeofW(StgHeader), stp);
2809 frame->updatee = to;
2812 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
2813 frame->updatee = to;
2814 recordMutable((StgMutClosure *)to);
2817 /* will never be SE_{,CAF_}BLACKHOLE, since we
2818 don't push an update frame for single-entry thunks. KSW 1999-01. */
2819 barf("scavenge_stack: UPDATE_FRAME updatee");
2824 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2831 bitmap = info->layout.bitmap;
2833 /* this assumes that the payload starts immediately after the info-ptr */
2835 while (bitmap != 0) {
2836 if ((bitmap & 1) == 0) {
2837 (StgClosure *)*p = evacuate((StgClosure *)*p);
2840 bitmap = bitmap >> 1;
2847 /* large bitmap (> 32 entries) */
2852 StgLargeBitmap *large_bitmap;
2855 large_bitmap = info->layout.large_bitmap;
2858 for (i=0; i<large_bitmap->size; i++) {
2859 bitmap = large_bitmap->bitmap[i];
2860 q = p + sizeof(W_) * 8;
2861 while (bitmap != 0) {
2862 if ((bitmap & 1) == 0) {
2863 (StgClosure *)*p = evacuate((StgClosure *)*p);
2866 bitmap = bitmap >> 1;
2868 if (i+1 < large_bitmap->size) {
2870 (StgClosure *)*p = evacuate((StgClosure *)*p);
2876 /* and don't forget to follow the SRT */
2881 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2886 /*-----------------------------------------------------------------------------
2887 scavenge the large object list.
2889 evac_gen set by caller; similar games played with evac_gen as with
2890 scavenge() - see comment at the top of scavenge(). Most large
2891 objects are (repeatedly) mutable, so most of the time evac_gen will
2893 --------------------------------------------------------------------------- */
2894 //@cindex scavenge_large
2897 scavenge_large(step *stp)
2901 const StgInfoTable* info;
2902 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2904 evac_gen = 0; /* most objects are mutable */
2905 bd = stp->new_large_objects;
2907 for (; bd != NULL; bd = stp->new_large_objects) {
2909 /* take this object *off* the large objects list and put it on
2910 * the scavenged large objects list. This is so that we can
2911 * treat new_large_objects as a stack and push new objects on
2912 * the front when evacuating.
2914 stp->new_large_objects = bd->link;
2915 dbl_link_onto(bd, &stp->scavenged_large_objects);
2918 info = get_itbl((StgClosure *)p);
2920 switch (info->type) {
2922 /* only certain objects can be "large"... */
2925 /* nothing to follow */
2929 /* follow everything */
2933 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2934 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2935 (StgClosure *)*p = evacuate((StgClosure *)*p);
2940 case MUT_ARR_PTRS_FROZEN:
2941 /* follow everything */
2943 StgPtr start = p, next;
2945 evac_gen = saved_evac_gen; /* not really mutable */
2946 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2947 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2948 (StgClosure *)*p = evacuate((StgClosure *)*p);
2951 if (failed_to_evac) {
2952 recordMutable((StgMutClosure *)start);
2958 scavengeTSO((StgTSO *)p);
2964 StgPAP* pap = (StgPAP *)p;
2966 evac_gen = saved_evac_gen; /* not really mutable */
2967 pap->fun = evacuate(pap->fun);
2968 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2974 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
2979 //@cindex zero_static_object_list
2982 zero_static_object_list(StgClosure* first_static)
2986 const StgInfoTable *info;
2988 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2990 link = STATIC_LINK(info, p);
2991 STATIC_LINK(info,p) = NULL;
2995 /* This function is only needed because we share the mutable link
2996 * field with the static link field in an IND_STATIC, so we have to
2997 * zero the mut_link field before doing a major GC, which needs the
2998 * static link field.
3000 * It doesn't do any harm to zero all the mutable link fields on the
3005 zero_mutable_list( StgMutClosure *first )
3007 StgMutClosure *next, *c;
3009 for (c = first; c != END_MUT_LIST; c = next) {
3015 /* -----------------------------------------------------------------------------
3017 -------------------------------------------------------------------------- */
3024 for (c = (StgIndStatic *)caf_list; c != NULL;
3025 c = (StgIndStatic *)c->static_link)
3027 c->header.info = c->saved_info;
3028 c->saved_info = NULL;
3029 /* could, but not necessary: c->static_link = NULL; */
3035 scavengeCAFs( void )
3040 for (c = (StgIndStatic *)caf_list; c != NULL;
3041 c = (StgIndStatic *)c->static_link)
3043 c->indirectee = evacuate(c->indirectee);
3047 /* -----------------------------------------------------------------------------
3048 Sanity code for CAF garbage collection.
3050 With DEBUG turned on, we manage a CAF list in addition to the SRT
3051 mechanism. After GC, we run down the CAF list and blackhole any
3052 CAFs which have been garbage collected. This means we get an error
3053 whenever the program tries to enter a garbage collected CAF.
3055 Any garbage collected CAFs are taken off the CAF list at the same
3057 -------------------------------------------------------------------------- */
3067 const StgInfoTable *info;
3078 ASSERT(info->type == IND_STATIC);
3080 if (STATIC_LINK(info,p) == NULL) {
3081 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3083 SET_INFO(p,&stg_BLACKHOLE_info);
3084 p = STATIC_LINK2(info,p);
3088 pp = &STATIC_LINK2(info,p);
3095 /* fprintf(stderr, "%d CAFs live\n", i); */
3099 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3100 //@subsection Lazy black holing
3102 /* -----------------------------------------------------------------------------
3105 Whenever a thread returns to the scheduler after possibly doing
3106 some work, we have to run down the stack and black-hole all the
3107 closures referred to by update frames.
3108 -------------------------------------------------------------------------- */
3109 //@cindex threadLazyBlackHole
3112 threadLazyBlackHole(StgTSO *tso)
3114 StgUpdateFrame *update_frame;
3115 StgBlockingQueue *bh;
3118 stack_end = &tso->stack[tso->stack_size];
3119 update_frame = tso->su;
3122 switch (get_itbl(update_frame)->type) {
3125 update_frame = ((StgCatchFrame *)update_frame)->link;
3129 bh = (StgBlockingQueue *)update_frame->updatee;
3131 /* if the thunk is already blackholed, it means we've also
3132 * already blackholed the rest of the thunks on this stack,
3133 * so we can stop early.
3135 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3136 * don't interfere with this optimisation.
3138 if (bh->header.info == &stg_BLACKHOLE_info) {
3142 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3143 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3144 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3145 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3147 SET_INFO(bh,&stg_BLACKHOLE_info);
3150 update_frame = update_frame->link;
3154 update_frame = ((StgSeqFrame *)update_frame)->link;
3160 barf("threadPaused");
3165 //@node Stack squeezing, Pausing a thread, Lazy black holing
3166 //@subsection Stack squeezing
3168 /* -----------------------------------------------------------------------------
3171 * Code largely pinched from old RTS, then hacked to bits. We also do
3172 * lazy black holing here.
3174 * -------------------------------------------------------------------------- */
3175 //@cindex threadSqueezeStack
3178 threadSqueezeStack(StgTSO *tso)
3180 lnat displacement = 0;
3181 StgUpdateFrame *frame;
3182 StgUpdateFrame *next_frame; /* Temporally next */
3183 StgUpdateFrame *prev_frame; /* Temporally previous */
3185 rtsBool prev_was_update_frame;
3187 StgUpdateFrame *top_frame;
3188 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3190 void printObj( StgClosure *obj ); // from Printer.c
3192 top_frame = tso->su;
3195 bottom = &(tso->stack[tso->stack_size]);
3198 /* There must be at least one frame, namely the STOP_FRAME.
3200 ASSERT((P_)frame < bottom);
3202 /* Walk down the stack, reversing the links between frames so that
3203 * we can walk back up as we squeeze from the bottom. Note that
3204 * next_frame and prev_frame refer to next and previous as they were
3205 * added to the stack, rather than the way we see them in this
3206 * walk. (It makes the next loop less confusing.)
3208 * Stop if we find an update frame pointing to a black hole
3209 * (see comment in threadLazyBlackHole()).
3213 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3214 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3215 prev_frame = frame->link;
3216 frame->link = next_frame;
3221 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3222 printObj((StgClosure *)prev_frame);
3223 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3226 switch (get_itbl(frame)->type) {
3229 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3242 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3244 printObj((StgClosure *)prev_frame);
3247 if (get_itbl(frame)->type == UPDATE_FRAME
3248 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3253 /* Now, we're at the bottom. Frame points to the lowest update
3254 * frame on the stack, and its link actually points to the frame
3255 * above. We have to walk back up the stack, squeezing out empty
3256 * update frames and turning the pointers back around on the way
3259 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3260 * we never want to eliminate it anyway. Just walk one step up
3261 * before starting to squeeze. When you get to the topmost frame,
3262 * remember that there are still some words above it that might have
3269 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3272 * Loop through all of the frames (everything except the very
3273 * bottom). Things are complicated by the fact that we have
3274 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3275 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3277 while (frame != NULL) {
3279 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3280 rtsBool is_update_frame;
3282 next_frame = frame->link;
3283 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3286 * 1. both the previous and current frame are update frames
3287 * 2. the current frame is empty
3289 if (prev_was_update_frame && is_update_frame &&
3290 (P_)prev_frame == frame_bottom + displacement) {
3292 /* Now squeeze out the current frame */
3293 StgClosure *updatee_keep = prev_frame->updatee;
3294 StgClosure *updatee_bypass = frame->updatee;
3297 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3301 /* Deal with blocking queues. If both updatees have blocked
3302 * threads, then we should merge the queues into the update
3303 * frame that we're keeping.
3305 * Alternatively, we could just wake them up: they'll just go
3306 * straight to sleep on the proper blackhole! This is less code
3307 * and probably less bug prone, although it's probably much
3310 #if 0 /* do it properly... */
3311 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3312 # error Unimplemented lazy BH warning. (KSW 1999-01)
3314 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3315 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3317 /* Sigh. It has one. Don't lose those threads! */
3318 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3319 /* Urgh. Two queues. Merge them. */
3320 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3322 while (keep_tso->link != END_TSO_QUEUE) {
3323 keep_tso = keep_tso->link;
3325 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3328 /* For simplicity, just swap the BQ for the BH */
3329 P_ temp = updatee_keep;
3331 updatee_keep = updatee_bypass;
3332 updatee_bypass = temp;
3334 /* Record the swap in the kept frame (below) */
3335 prev_frame->updatee = updatee_keep;
3340 TICK_UPD_SQUEEZED();
3341 /* wasn't there something about update squeezing and ticky to be
3342 * sorted out? oh yes: we aren't counting each enter properly
3343 * in this case. See the log somewhere. KSW 1999-04-21
3345 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3347 sp = (P_)frame - 1; /* sp = stuff to slide */
3348 displacement += sizeofW(StgUpdateFrame);
3351 /* No squeeze for this frame */
3352 sp = frame_bottom - 1; /* Keep the current frame */
3354 /* Do lazy black-holing.
3356 if (is_update_frame) {
3357 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3358 if (bh->header.info != &stg_BLACKHOLE_info &&
3359 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3360 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3361 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3362 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3365 /* zero out the slop so that the sanity checker can tell
3366 * where the next closure is.
3369 StgInfoTable *info = get_itbl(bh);
3370 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3371 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3372 * info is used for a different purpose, and it's exactly the
3373 * same size as a BLACKHOLE in any case.
3375 if (info->type != THUNK_SELECTOR) {
3376 for (i = np; i < np + nw; i++) {
3377 ((StgClosure *)bh)->payload[i] = 0;
3382 SET_INFO(bh,&stg_BLACKHOLE_info);
3386 /* Fix the link in the current frame (should point to the frame below) */
3387 frame->link = prev_frame;
3388 prev_was_update_frame = is_update_frame;
3391 /* Now slide all words from sp up to the next frame */
3393 if (displacement > 0) {
3394 P_ next_frame_bottom;
3396 if (next_frame != NULL)
3397 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3399 next_frame_bottom = tso->sp - 1;
3403 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3407 while (sp >= next_frame_bottom) {
3408 sp[displacement] = *sp;
3412 (P_)prev_frame = (P_)frame + displacement;
3416 tso->sp += displacement;
3417 tso->su = prev_frame;
3420 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3421 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3425 //@node Pausing a thread, Index, Stack squeezing
3426 //@subsection Pausing a thread
3428 /* -----------------------------------------------------------------------------
3431 * We have to prepare for GC - this means doing lazy black holing
3432 * here. We also take the opportunity to do stack squeezing if it's
3434 * -------------------------------------------------------------------------- */
3435 //@cindex threadPaused
3437 threadPaused(StgTSO *tso)
3439 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3440 threadSqueezeStack(tso); /* does black holing too */
3442 threadLazyBlackHole(tso);
3445 /* -----------------------------------------------------------------------------
3447 * -------------------------------------------------------------------------- */
3450 //@cindex printMutOnceList
3452 printMutOnceList(generation *gen)
3454 StgMutClosure *p, *next;
3456 p = gen->mut_once_list;
3459 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3460 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3461 fprintf(stderr, "%p (%s), ",
3462 p, info_type((StgClosure *)p));
3464 fputc('\n', stderr);
3467 //@cindex printMutableList
3469 printMutableList(generation *gen)
3471 StgMutClosure *p, *next;
3476 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3477 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3478 fprintf(stderr, "%p (%s), ",
3479 p, info_type((StgClosure *)p));
3481 fputc('\n', stderr);
3484 //@cindex maybeLarge
3485 static inline rtsBool
3486 maybeLarge(StgClosure *closure)
3488 StgInfoTable *info = get_itbl(closure);
3490 /* closure types that may be found on the new_large_objects list;
3491 see scavenge_large */
3492 return (info->type == MUT_ARR_PTRS ||
3493 info->type == MUT_ARR_PTRS_FROZEN ||
3494 info->type == TSO ||
3495 info->type == ARR_WORDS);
3501 //@node Index, , Pausing a thread
3505 //* GarbageCollect:: @cindex\s-+GarbageCollect
3506 //* MarkRoot:: @cindex\s-+MarkRoot
3507 //* RevertCAFs:: @cindex\s-+RevertCAFs
3508 //* addBlock:: @cindex\s-+addBlock
3509 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3510 //* copy:: @cindex\s-+copy
3511 //* copyPart:: @cindex\s-+copyPart
3512 //* evacuate:: @cindex\s-+evacuate
3513 //* evacuate_large:: @cindex\s-+evacuate_large
3514 //* gcCAFs:: @cindex\s-+gcCAFs
3515 //* isAlive:: @cindex\s-+isAlive
3516 //* maybeLarge:: @cindex\s-+maybeLarge
3517 //* mkMutCons:: @cindex\s-+mkMutCons
3518 //* printMutOnceList:: @cindex\s-+printMutOnceList
3519 //* printMutableList:: @cindex\s-+printMutableList
3520 //* relocate_TSO:: @cindex\s-+relocate_TSO
3521 //* scavenge:: @cindex\s-+scavenge
3522 //* scavenge_large:: @cindex\s-+scavenge_large
3523 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3524 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3525 //* scavenge_one:: @cindex\s-+scavenge_one
3526 //* scavenge_srt:: @cindex\s-+scavenge_srt
3527 //* scavenge_stack:: @cindex\s-+scavenge_stack
3528 //* scavenge_static:: @cindex\s-+scavenge_static
3529 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3530 //* threadPaused:: @cindex\s-+threadPaused
3531 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3532 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3533 //* upd_evacuee:: @cindex\s-+upd_evacuee
3534 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3535 //* zero_static_object_list:: @cindex\s-+zero_static_object_list