1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.78 2000/04/11 16:36:53 sewardj Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
46 #if defined(GRAN) || defined(PAR)
47 # include "GranSimRts.h"
48 # include "ParallelRts.h"
52 # include "ParallelDebug.h"
58 //@node STATIC OBJECT LIST, Static function declarations, Includes
59 //@subsection STATIC OBJECT LIST
61 /* STATIC OBJECT LIST.
64 * We maintain a linked list of static objects that are still live.
65 * The requirements for this list are:
67 * - we need to scan the list while adding to it, in order to
68 * scavenge all the static objects (in the same way that
69 * breadth-first scavenging works for dynamic objects).
71 * - we need to be able to tell whether an object is already on
72 * the list, to break loops.
74 * Each static object has a "static link field", which we use for
75 * linking objects on to the list. We use a stack-type list, consing
76 * objects on the front as they are added (this means that the
77 * scavenge phase is depth-first, not breadth-first, but that
80 * A separate list is kept for objects that have been scavenged
81 * already - this is so that we can zero all the marks afterwards.
83 * An object is on the list if its static link field is non-zero; this
84 * means that we have to mark the end of the list with '1', not NULL.
86 * Extra notes for generational GC:
88 * Each generation has a static object list associated with it. When
89 * collecting generations up to N, we treat the static object lists
90 * from generations > N as roots.
92 * We build up a static object list while collecting generations 0..N,
93 * which is then appended to the static object list of generation N+1.
95 StgClosure* static_objects; /* live static objects */
96 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
98 /* N is the oldest generation being collected, where the generations
99 * are numbered starting at 0. A major GC (indicated by the major_gc
100 * flag) is when we're collecting all generations. We only attempt to
101 * deal with static objects and GC CAFs when doing a major GC.
104 static rtsBool major_gc;
106 /* Youngest generation that objects should be evacuated to in
107 * evacuate(). (Logically an argument to evacuate, but it's static
108 * a lot of the time so we optimise it into a global variable).
114 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
115 static rtsBool weak_done; /* all done for this pass */
117 /* List of all threads during GC
119 static StgTSO *old_all_threads;
120 static StgTSO *resurrected_threads;
122 /* Flag indicating failure to evacuate an object to the desired
125 static rtsBool failed_to_evac;
127 /* Old to-space (used for two-space collector only)
129 bdescr *old_to_space;
132 /* Data used for allocation area sizing.
134 lnat new_blocks; /* blocks allocated during this GC */
135 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
137 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
138 //@subsection Static function declarations
140 /* -----------------------------------------------------------------------------
141 Static function declarations
142 -------------------------------------------------------------------------- */
144 static StgClosure * evacuate ( StgClosure *q );
145 static void zero_static_object_list ( StgClosure* first_static );
146 static void zero_mutable_list ( StgMutClosure *first );
147 static void revert_dead_CAFs ( void );
149 static rtsBool traverse_weak_ptr_list ( void );
150 static void cleanup_weak_ptr_list ( StgWeak **list );
152 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
153 static void scavenge_large ( step *step );
154 static void scavenge ( step *step );
155 static void scavenge_static ( void );
156 static void scavenge_mutable_list ( generation *g );
157 static void scavenge_mut_once_list ( generation *g );
160 static void gcCAFs ( void );
163 //@node Garbage Collect, Weak Pointers, Static function declarations
164 //@subsection Garbage Collect
166 /* -----------------------------------------------------------------------------
169 For garbage collecting generation N (and all younger generations):
171 - follow all pointers in the root set. the root set includes all
172 mutable objects in all steps in all generations.
174 - for each pointer, evacuate the object it points to into either
175 + to-space in the next higher step in that generation, if one exists,
176 + if the object's generation == N, then evacuate it to the next
177 generation if one exists, or else to-space in the current
179 + if the object's generation < N, then evacuate it to to-space
180 in the next generation.
182 - repeatedly scavenge to-space from each step in each generation
183 being collected until no more objects can be evacuated.
185 - free from-space in each step, and set from-space = to-space.
187 -------------------------------------------------------------------------- */
188 //@cindex GarbageCollect
190 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
194 lnat live, allocated, collected = 0, copied = 0;
198 CostCentreStack *prev_CCS;
201 #if defined(DEBUG) && defined(GRAN)
202 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
206 /* tell the stats department that we've started a GC */
209 /* attribute any costs to CCS_GC */
215 /* Approximate how much we allocated */
216 allocated = calcAllocated();
218 /* Figure out which generation to collect
220 if (force_major_gc) {
221 N = RtsFlags.GcFlags.generations - 1;
225 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
226 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
230 major_gc = (N == RtsFlags.GcFlags.generations-1);
233 /* check stack sanity *before* GC (ToDo: check all threads) */
235 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
237 IF_DEBUG(sanity, checkFreeListSanity());
239 /* Initialise the static object lists
241 static_objects = END_OF_STATIC_LIST;
242 scavenged_static_objects = END_OF_STATIC_LIST;
244 /* zero the mutable list for the oldest generation (see comment by
245 * zero_mutable_list below).
248 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
251 /* Save the old to-space if we're doing a two-space collection
253 if (RtsFlags.GcFlags.generations == 1) {
254 old_to_space = g0s0->to_space;
255 g0s0->to_space = NULL;
258 /* Keep a count of how many new blocks we allocated during this GC
259 * (used for resizing the allocation area, later).
263 /* Initialise to-space in all the generations/steps that we're
266 for (g = 0; g <= N; g++) {
267 generations[g].mut_once_list = END_MUT_LIST;
268 generations[g].mut_list = END_MUT_LIST;
270 for (s = 0; s < generations[g].n_steps; s++) {
272 /* generation 0, step 0 doesn't need to-space */
273 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
277 /* Get a free block for to-space. Extra blocks will be chained on
281 step = &generations[g].steps[s];
282 ASSERT(step->gen->no == g);
283 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
284 bd->gen = &generations[g];
287 bd->evacuated = 1; /* it's a to-space block */
288 step->hp = bd->start;
289 step->hpLim = step->hp + BLOCK_SIZE_W;
293 step->scan = bd->start;
295 step->new_large_objects = NULL;
296 step->scavenged_large_objects = NULL;
298 /* mark the large objects as not evacuated yet */
299 for (bd = step->large_objects; bd; bd = bd->link) {
305 /* make sure the older generations have at least one block to
306 * allocate into (this makes things easier for copy(), see below.
308 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
309 for (s = 0; s < generations[g].n_steps; s++) {
310 step = &generations[g].steps[s];
311 if (step->hp_bd == NULL) {
313 bd->gen = &generations[g];
316 bd->evacuated = 0; /* *not* a to-space block */
317 step->hp = bd->start;
318 step->hpLim = step->hp + BLOCK_SIZE_W;
324 /* Set the scan pointer for older generations: remember we
325 * still have to scavenge objects that have been promoted. */
326 step->scan = step->hp;
327 step->scan_bd = step->hp_bd;
328 step->to_space = NULL;
330 step->new_large_objects = NULL;
331 step->scavenged_large_objects = NULL;
335 /* -----------------------------------------------------------------------
336 * follow all the roots that we know about:
337 * - mutable lists from each generation > N
338 * we want to *scavenge* these roots, not evacuate them: they're not
339 * going to move in this GC.
340 * Also: do them in reverse generation order. This is because we
341 * often want to promote objects that are pointed to by older
342 * generations early, so we don't have to repeatedly copy them.
343 * Doing the generations in reverse order ensures that we don't end
344 * up in the situation where we want to evac an object to gen 3 and
345 * it has already been evaced to gen 2.
349 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
350 generations[g].saved_mut_list = generations[g].mut_list;
351 generations[g].mut_list = END_MUT_LIST;
354 /* Do the mut-once lists first */
355 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
356 IF_PAR_DEBUG(verbose,
357 printMutOnceList(&generations[g]));
358 scavenge_mut_once_list(&generations[g]);
360 for (st = generations[g].n_steps-1; st >= 0; st--) {
361 scavenge(&generations[g].steps[st]);
365 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
366 IF_PAR_DEBUG(verbose,
367 printMutableList(&generations[g]));
368 scavenge_mutable_list(&generations[g]);
370 for (st = generations[g].n_steps-1; st >= 0; st--) {
371 scavenge(&generations[g].steps[st]);
376 /* follow all the roots that the application knows about.
382 /* And don't forget to mark the TSO if we got here direct from
384 /* Not needed in a seq version?
386 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
390 /* Mark the entries in the GALA table of the parallel system */
391 markLocalGAs(major_gc);
394 /* Mark the weak pointer list, and prepare to detect dead weak
397 old_weak_ptr_list = weak_ptr_list;
398 weak_ptr_list = NULL;
399 weak_done = rtsFalse;
401 /* The all_threads list is like the weak_ptr_list.
402 * See traverse_weak_ptr_list() for the details.
404 old_all_threads = all_threads;
405 all_threads = END_TSO_QUEUE;
406 resurrected_threads = END_TSO_QUEUE;
408 /* Mark the stable pointer table.
410 markStablePtrTable(major_gc);
414 /* ToDo: To fix the caf leak, we need to make the commented out
415 * parts of this code do something sensible - as described in
418 extern void markHugsObjects(void);
423 /* -------------------------------------------------------------------------
424 * Repeatedly scavenge all the areas we know about until there's no
425 * more scavenging to be done.
432 /* scavenge static objects */
433 if (major_gc && static_objects != END_OF_STATIC_LIST) {
435 checkStaticObjects());
439 /* When scavenging the older generations: Objects may have been
440 * evacuated from generations <= N into older generations, and we
441 * need to scavenge these objects. We're going to try to ensure that
442 * any evacuations that occur move the objects into at least the
443 * same generation as the object being scavenged, otherwise we
444 * have to create new entries on the mutable list for the older
448 /* scavenge each step in generations 0..maxgen */
452 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
453 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
454 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
457 step = &generations[gen].steps[st];
459 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
464 if (step->new_large_objects != NULL) {
465 scavenge_large(step);
472 if (flag) { goto loop; }
474 /* must be last... */
475 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
480 /* Final traversal of the weak pointer list (see comment by
481 * cleanUpWeakPtrList below).
483 cleanup_weak_ptr_list(&weak_ptr_list);
485 /* Now see which stable names are still alive.
487 gcStablePtrTable(major_gc);
489 /* revert dead CAFs and update enteredCAFs list */
493 /* Reconstruct the Global Address tables used in GUM */
494 rebuildGAtables(major_gc);
495 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
496 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
499 /* Set the maximum blocks for the oldest generation, based on twice
500 * the amount of live data now, adjusted to fit the maximum heap
503 * This is an approximation, since in the worst case we'll need
504 * twice the amount of live data plus whatever space the other
507 if (RtsFlags.GcFlags.generations > 1) {
509 oldest_gen->max_blocks =
510 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
511 RtsFlags.GcFlags.minOldGenSize);
512 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
513 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
514 if (((int)oldest_gen->max_blocks -
515 (int)oldest_gen->steps[0].to_blocks) <
516 (RtsFlags.GcFlags.pcFreeHeap *
517 RtsFlags.GcFlags.maxHeapSize / 200)) {
524 /* run through all the generations/steps and tidy up
526 copied = new_blocks * BLOCK_SIZE_W;
527 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
530 generations[g].collections++; /* for stats */
533 for (s = 0; s < generations[g].n_steps; s++) {
535 step = &generations[g].steps[s];
537 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
538 /* Tidy the end of the to-space chains */
539 step->hp_bd->free = step->hp;
540 step->hp_bd->link = NULL;
541 /* stats information: how much we copied */
543 copied -= step->hp_bd->start + BLOCK_SIZE_W -
548 /* for generations we collected... */
551 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
553 /* free old memory and shift to-space into from-space for all
554 * the collected steps (except the allocation area). These
555 * freed blocks will probaby be quickly recycled.
557 if (!(g == 0 && s == 0)) {
558 freeChain(step->blocks);
559 step->blocks = step->to_space;
560 step->n_blocks = step->to_blocks;
561 step->to_space = NULL;
563 for (bd = step->blocks; bd != NULL; bd = bd->link) {
564 bd->evacuated = 0; /* now from-space */
568 /* LARGE OBJECTS. The current live large objects are chained on
569 * scavenged_large, having been moved during garbage
570 * collection from large_objects. Any objects left on
571 * large_objects list are therefore dead, so we free them here.
573 for (bd = step->large_objects; bd != NULL; bd = next) {
578 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
581 step->large_objects = step->scavenged_large_objects;
583 /* Set the maximum blocks for this generation, interpolating
584 * between the maximum size of the oldest and youngest
587 * max_blocks = oldgen_max_blocks * G
588 * ----------------------
593 generations[g].max_blocks = (oldest_gen->max_blocks * g)
594 / (RtsFlags.GcFlags.generations-1);
596 generations[g].max_blocks = oldest_gen->max_blocks;
599 /* for older generations... */
602 /* For older generations, we need to append the
603 * scavenged_large_object list (i.e. large objects that have been
604 * promoted during this GC) to the large_object list for that step.
606 for (bd = step->scavenged_large_objects; bd; bd = next) {
609 dbl_link_onto(bd, &step->large_objects);
612 /* add the new blocks we promoted during this GC */
613 step->n_blocks += step->to_blocks;
618 /* Guess the amount of live data for stats. */
621 /* Free the small objects allocated via allocate(), since this will
622 * all have been copied into G0S1 now.
624 if (small_alloc_list != NULL) {
625 freeChain(small_alloc_list);
627 small_alloc_list = NULL;
631 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
633 /* Two-space collector:
634 * Free the old to-space, and estimate the amount of live data.
636 if (RtsFlags.GcFlags.generations == 1) {
639 if (old_to_space != NULL) {
640 freeChain(old_to_space);
642 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
643 bd->evacuated = 0; /* now from-space */
646 /* For a two-space collector, we need to resize the nursery. */
648 /* set up a new nursery. Allocate a nursery size based on a
649 * function of the amount of live data (currently a factor of 2,
650 * should be configurable (ToDo)). Use the blocks from the old
651 * nursery if possible, freeing up any left over blocks.
653 * If we get near the maximum heap size, then adjust our nursery
654 * size accordingly. If the nursery is the same size as the live
655 * data (L), then we need 3L bytes. We can reduce the size of the
656 * nursery to bring the required memory down near 2L bytes.
658 * A normal 2-space collector would need 4L bytes to give the same
659 * performance we get from 3L bytes, reducing to the same
660 * performance at 2L bytes.
662 blocks = g0s0->to_blocks;
664 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
665 RtsFlags.GcFlags.maxHeapSize ) {
666 int adjusted_blocks; /* signed on purpose */
669 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
670 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
671 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
672 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
675 blocks = adjusted_blocks;
678 blocks *= RtsFlags.GcFlags.oldGenFactor;
679 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
680 blocks = RtsFlags.GcFlags.minAllocAreaSize;
683 resizeNursery(blocks);
686 /* Generational collector:
687 * If the user has given us a suggested heap size, adjust our
688 * allocation area to make best use of the memory available.
691 if (RtsFlags.GcFlags.heapSizeSuggestion) {
693 nat needed = calcNeeded(); /* approx blocks needed at next GC */
695 /* Guess how much will be live in generation 0 step 0 next time.
696 * A good approximation is the obtained by finding the
697 * percentage of g0s0 that was live at the last minor GC.
700 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
703 /* Estimate a size for the allocation area based on the
704 * information available. We might end up going slightly under
705 * or over the suggested heap size, but we should be pretty
708 * Formula: suggested - needed
709 * ----------------------------
710 * 1 + g0s0_pcnt_kept/100
712 * where 'needed' is the amount of memory needed at the next
713 * collection for collecting all steps except g0s0.
716 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
717 (100 + (int)g0s0_pcnt_kept);
719 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
720 blocks = RtsFlags.GcFlags.minAllocAreaSize;
723 resizeNursery((nat)blocks);
727 /* mark the garbage collected CAFs as dead */
729 if (major_gc) { gcCAFs(); }
732 /* zero the scavenged static object list */
734 zero_static_object_list(scavenged_static_objects);
741 /* start any pending finalizers */
742 scheduleFinalizers(old_weak_ptr_list);
744 /* send exceptions to any threads which were about to die */
745 resurrectThreads(resurrected_threads);
747 /* check sanity after GC */
748 IF_DEBUG(sanity, checkSanity(N));
750 /* extra GC trace info */
751 IF_DEBUG(gc, stat_describe_gens());
754 /* symbol-table based profiling */
755 /* heapCensus(to_space); */ /* ToDo */
758 /* restore enclosing cost centre */
764 /* check for memory leaks if sanity checking is on */
765 IF_DEBUG(sanity, memInventory());
767 /* ok, GC over: tell the stats department what happened. */
768 stat_endGC(allocated, collected, live, copied, N);
771 //@node Weak Pointers, Evacuation, Garbage Collect
772 //@subsection Weak Pointers
774 /* -----------------------------------------------------------------------------
777 traverse_weak_ptr_list is called possibly many times during garbage
778 collection. It returns a flag indicating whether it did any work
779 (i.e. called evacuate on any live pointers).
781 Invariant: traverse_weak_ptr_list is called when the heap is in an
782 idempotent state. That means that there are no pending
783 evacuate/scavenge operations. This invariant helps the weak
784 pointer code decide which weak pointers are dead - if there are no
785 new live weak pointers, then all the currently unreachable ones are
788 For generational GC: we just don't try to finalize weak pointers in
789 older generations than the one we're collecting. This could
790 probably be optimised by keeping per-generation lists of weak
791 pointers, but for a few weak pointers this scheme will work.
792 -------------------------------------------------------------------------- */
793 //@cindex traverse_weak_ptr_list
796 traverse_weak_ptr_list(void)
798 StgWeak *w, **last_w, *next_w;
800 rtsBool flag = rtsFalse;
802 if (weak_done) { return rtsFalse; }
804 /* doesn't matter where we evacuate values/finalizers to, since
805 * these pointers are treated as roots (iff the keys are alive).
809 last_w = &old_weak_ptr_list;
810 for (w = old_weak_ptr_list; w; w = next_w) {
812 /* First, this weak pointer might have been evacuated. If so,
813 * remove the forwarding pointer from the weak_ptr_list.
815 if (get_itbl(w)->type == EVACUATED) {
816 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
820 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
821 * called on a live weak pointer object. Just remove it.
823 if (w->header.info == &DEAD_WEAK_info) {
824 next_w = ((StgDeadWeak *)w)->link;
829 ASSERT(get_itbl(w)->type == WEAK);
831 /* Now, check whether the key is reachable.
833 if ((new = isAlive(w->key))) {
835 /* evacuate the value and finalizer */
836 w->value = evacuate(w->value);
837 w->finalizer = evacuate(w->finalizer);
838 /* remove this weak ptr from the old_weak_ptr list */
840 /* and put it on the new weak ptr list */
842 w->link = weak_ptr_list;
845 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
855 /* Now deal with the all_threads list, which behaves somewhat like
856 * the weak ptr list. If we discover any threads that are about to
857 * become garbage, we wake them up and administer an exception.
860 StgTSO *t, *tmp, *next, **prev;
862 prev = &old_all_threads;
863 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
865 /* Threads which have finished or died get dropped from
868 switch (t->what_next) {
871 next = t->global_link;
877 /* Threads which have already been determined to be alive are
878 * moved onto the all_threads list.
880 (StgClosure *)tmp = isAlive((StgClosure *)t);
882 next = tmp->global_link;
883 tmp->global_link = all_threads;
887 prev = &(t->global_link);
888 next = t->global_link;
893 /* If we didn't make any changes, then we can go round and kill all
894 * the dead weak pointers. The old_weak_ptr list is used as a list
895 * of pending finalizers later on.
897 if (flag == rtsFalse) {
898 cleanup_weak_ptr_list(&old_weak_ptr_list);
899 for (w = old_weak_ptr_list; w; w = w->link) {
900 w->finalizer = evacuate(w->finalizer);
903 /* And resurrect any threads which were about to become garbage.
906 StgTSO *t, *tmp, *next;
907 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
908 next = t->global_link;
909 (StgClosure *)tmp = evacuate((StgClosure *)t);
910 tmp->global_link = resurrected_threads;
911 resurrected_threads = tmp;
921 /* -----------------------------------------------------------------------------
922 After GC, the live weak pointer list may have forwarding pointers
923 on it, because a weak pointer object was evacuated after being
924 moved to the live weak pointer list. We remove those forwarding
927 Also, we don't consider weak pointer objects to be reachable, but
928 we must nevertheless consider them to be "live" and retain them.
929 Therefore any weak pointer objects which haven't as yet been
930 evacuated need to be evacuated now.
931 -------------------------------------------------------------------------- */
933 //@cindex cleanup_weak_ptr_list
936 cleanup_weak_ptr_list ( StgWeak **list )
938 StgWeak *w, **last_w;
941 for (w = *list; w; w = w->link) {
943 if (get_itbl(w)->type == EVACUATED) {
944 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
948 if (Bdescr((P_)w)->evacuated == 0) {
949 (StgClosure *)w = evacuate((StgClosure *)w);
956 /* -----------------------------------------------------------------------------
957 isAlive determines whether the given closure is still alive (after
958 a garbage collection) or not. It returns the new address of the
959 closure if it is alive, or NULL otherwise.
960 -------------------------------------------------------------------------- */
965 isAlive(StgClosure *p)
967 const StgInfoTable *info;
974 /* ToDo: for static closures, check the static link field.
975 * Problem here is that we sometimes don't set the link field, eg.
976 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
979 /* ignore closures in generations that we're not collecting. */
980 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
984 switch (info->type) {
989 case IND_OLDGEN: /* rely on compatible layout with StgInd */
990 case IND_OLDGEN_PERM:
991 /* follow indirections */
992 p = ((StgInd *)p)->indirectee;
997 return ((StgEvacuated *)p)->evacuee;
1000 size = bco_sizeW((StgBCO*)p);
1004 size = arr_words_sizeW((StgArrWords *)p);
1008 case MUT_ARR_PTRS_FROZEN:
1009 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1013 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1014 p = (StgClosure *)((StgTSO *)p)->link;
1018 size = tso_sizeW((StgTSO *)p);
1020 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1021 && Bdescr((P_)p)->evacuated)
1035 MarkRoot(StgClosure *root)
1037 # if 0 && defined(PAR) && defined(DEBUG)
1038 StgClosure *foo = evacuate(root);
1039 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1040 ASSERT(isAlive(foo)); // must be in to-space
1043 return evacuate(root);
1048 static void addBlock(step *step)
1050 bdescr *bd = allocBlock();
1051 bd->gen = step->gen;
1054 if (step->gen->no <= N) {
1060 step->hp_bd->free = step->hp;
1061 step->hp_bd->link = bd;
1062 step->hp = bd->start;
1063 step->hpLim = step->hp + BLOCK_SIZE_W;
1069 //@cindex upd_evacuee
1071 static __inline__ void
1072 upd_evacuee(StgClosure *p, StgClosure *dest)
1074 p->header.info = &EVACUATED_info;
1075 ((StgEvacuated *)p)->evacuee = dest;
1080 static __inline__ StgClosure *
1081 copy(StgClosure *src, nat size, step *step)
1085 TICK_GC_WORDS_COPIED(size);
1086 /* Find out where we're going, using the handy "to" pointer in
1087 * the step of the source object. If it turns out we need to
1088 * evacuate to an older generation, adjust it here (see comment
1091 if (step->gen->no < evac_gen) {
1092 #ifdef NO_EAGER_PROMOTION
1093 failed_to_evac = rtsTrue;
1095 step = &generations[evac_gen].steps[0];
1099 /* chain a new block onto the to-space for the destination step if
1102 if (step->hp + size >= step->hpLim) {
1106 for(to = step->hp, from = (P_)src; size>0; --size) {
1112 upd_evacuee(src,(StgClosure *)dest);
1113 return (StgClosure *)dest;
1116 /* Special version of copy() for when we only want to copy the info
1117 * pointer of an object, but reserve some padding after it. This is
1118 * used to optimise evacuation of BLACKHOLEs.
1123 static __inline__ StgClosure *
1124 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1128 TICK_GC_WORDS_COPIED(size_to_copy);
1129 if (step->gen->no < evac_gen) {
1130 #ifdef NO_EAGER_PROMOTION
1131 failed_to_evac = rtsTrue;
1133 step = &generations[evac_gen].steps[0];
1137 if (step->hp + size_to_reserve >= step->hpLim) {
1141 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1146 step->hp += size_to_reserve;
1147 upd_evacuee(src,(StgClosure *)dest);
1148 return (StgClosure *)dest;
1151 //@node Evacuation, Scavenging, Weak Pointers
1152 //@subsection Evacuation
1154 /* -----------------------------------------------------------------------------
1155 Evacuate a large object
1157 This just consists of removing the object from the (doubly-linked)
1158 large_alloc_list, and linking it on to the (singly-linked)
1159 new_large_objects list, from where it will be scavenged later.
1161 Convention: bd->evacuated is /= 0 for a large object that has been
1162 evacuated, or 0 otherwise.
1163 -------------------------------------------------------------------------- */
1165 //@cindex evacuate_large
1168 evacuate_large(StgPtr p, rtsBool mutable)
1170 bdescr *bd = Bdescr(p);
1173 /* should point to the beginning of the block */
1174 ASSERT(((W_)p & BLOCK_MASK) == 0);
1176 /* already evacuated? */
1177 if (bd->evacuated) {
1178 /* Don't forget to set the failed_to_evac flag if we didn't get
1179 * the desired destination (see comments in evacuate()).
1181 if (bd->gen->no < evac_gen) {
1182 failed_to_evac = rtsTrue;
1183 TICK_GC_FAILED_PROMOTION();
1189 /* remove from large_object list */
1191 bd->back->link = bd->link;
1192 } else { /* first object in the list */
1193 step->large_objects = bd->link;
1196 bd->link->back = bd->back;
1199 /* link it on to the evacuated large object list of the destination step
1201 step = bd->step->to;
1202 if (step->gen->no < evac_gen) {
1203 #ifdef NO_EAGER_PROMOTION
1204 failed_to_evac = rtsTrue;
1206 step = &generations[evac_gen].steps[0];
1211 bd->gen = step->gen;
1212 bd->link = step->new_large_objects;
1213 step->new_large_objects = bd;
1217 recordMutable((StgMutClosure *)p);
1221 /* -----------------------------------------------------------------------------
1222 Adding a MUT_CONS to an older generation.
1224 This is necessary from time to time when we end up with an
1225 old-to-new generation pointer in a non-mutable object. We defer
1226 the promotion until the next GC.
1227 -------------------------------------------------------------------------- */
1232 mkMutCons(StgClosure *ptr, generation *gen)
1237 step = &gen->steps[0];
1239 /* chain a new block onto the to-space for the destination step if
1242 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1246 q = (StgMutVar *)step->hp;
1247 step->hp += sizeofW(StgMutVar);
1249 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1251 recordOldToNewPtrs((StgMutClosure *)q);
1253 return (StgClosure *)q;
1256 /* -----------------------------------------------------------------------------
1259 This is called (eventually) for every live object in the system.
1261 The caller to evacuate specifies a desired generation in the
1262 evac_gen global variable. The following conditions apply to
1263 evacuating an object which resides in generation M when we're
1264 collecting up to generation N
1268 else evac to step->to
1270 if M < evac_gen evac to evac_gen, step 0
1272 if the object is already evacuated, then we check which generation
1275 if M >= evac_gen do nothing
1276 if M < evac_gen set failed_to_evac flag to indicate that we
1277 didn't manage to evacuate this object into evac_gen.
1279 -------------------------------------------------------------------------- */
1283 evacuate(StgClosure *q)
1288 const StgInfoTable *info;
1291 if (HEAP_ALLOCED(q)) {
1293 if (bd->gen->no > N) {
1294 /* Can't evacuate this object, because it's in a generation
1295 * older than the ones we're collecting. Let's hope that it's
1296 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1298 if (bd->gen->no < evac_gen) {
1300 failed_to_evac = rtsTrue;
1301 TICK_GC_FAILED_PROMOTION();
1305 step = bd->step->to;
1308 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1311 /* make sure the info pointer is into text space */
1312 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1313 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1316 if (info->type==RBH) {
1317 info = REVERT_INFOPTR(info);
1319 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1320 q, info_type(q), info, info_type_by_ip(info)));
1324 switch (info -> type) {
1328 nat size = bco_sizeW((StgBCO*)q);
1330 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1331 evacuate_large((P_)q, rtsFalse);
1334 /* just copy the block */
1335 to = copy(q,size,step);
1341 ASSERT(q->header.info != &MUT_CONS_info);
1343 to = copy(q,sizeW_fromITBL(info),step);
1344 recordMutable((StgMutClosure *)to);
1351 return copy(q,sizeofW(StgHeader)+1,step);
1353 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1358 #ifdef NO_PROMOTE_THUNKS
1359 if (bd->gen->no == 0 &&
1360 bd->step->no != 0 &&
1361 bd->step->no == bd->gen->n_steps-1) {
1365 return copy(q,sizeofW(StgHeader)+2,step);
1373 return copy(q,sizeofW(StgHeader)+2,step);
1379 case IND_OLDGEN_PERM:
1385 return copy(q,sizeW_fromITBL(info),step);
1388 case SE_CAF_BLACKHOLE:
1391 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1394 to = copy(q,BLACKHOLE_sizeW(),step);
1395 recordMutable((StgMutClosure *)to);
1398 case THUNK_SELECTOR:
1400 const StgInfoTable* selectee_info;
1401 StgClosure* selectee = ((StgSelector*)q)->selectee;
1404 selectee_info = get_itbl(selectee);
1405 switch (selectee_info->type) {
1414 StgWord32 offset = info->layout.selector_offset;
1416 /* check that the size is in range */
1418 (StgWord32)(selectee_info->layout.payload.ptrs +
1419 selectee_info->layout.payload.nptrs));
1421 /* perform the selection! */
1422 q = selectee->payload[offset];
1424 /* if we're already in to-space, there's no need to continue
1425 * with the evacuation, just update the source address with
1426 * a pointer to the (evacuated) constructor field.
1428 if (HEAP_ALLOCED(q)) {
1429 bdescr *bd = Bdescr((P_)q);
1430 if (bd->evacuated) {
1431 if (bd->gen->no < evac_gen) {
1432 failed_to_evac = rtsTrue;
1433 TICK_GC_FAILED_PROMOTION();
1439 /* otherwise, carry on and evacuate this constructor field,
1440 * (but not the constructor itself)
1449 case IND_OLDGEN_PERM:
1450 selectee = ((StgInd *)selectee)->indirectee;
1454 selectee = ((StgCAF *)selectee)->value;
1458 selectee = ((StgEvacuated *)selectee)->evacuee;
1468 case THUNK_SELECTOR:
1469 /* aargh - do recursively???? */
1472 case SE_CAF_BLACKHOLE:
1476 /* not evaluated yet */
1480 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1481 (int)(selectee_info->type));
1484 return copy(q,THUNK_SELECTOR_sizeW(),step);
1488 /* follow chains of indirections, don't evacuate them */
1489 q = ((StgInd*)q)->indirectee;
1493 if (info->srt_len > 0 && major_gc &&
1494 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1495 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1496 static_objects = (StgClosure *)q;
1501 if (info->srt_len > 0 && major_gc &&
1502 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1503 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1504 static_objects = (StgClosure *)q;
1509 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1510 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1511 static_objects = (StgClosure *)q;
1516 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1517 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1518 static_objects = (StgClosure *)q;
1522 case CONSTR_INTLIKE:
1523 case CONSTR_CHARLIKE:
1524 case CONSTR_NOCAF_STATIC:
1525 /* no need to put these on the static linked list, they don't need
1540 /* shouldn't see these */
1541 barf("evacuate: stack frame at %p\n", q);
1545 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1546 * of stack, tagging and all.
1548 * They can be larger than a block in size. Both are only
1549 * allocated via allocate(), so they should be chained on to the
1550 * large_object list.
1553 nat size = pap_sizeW((StgPAP*)q);
1554 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1555 evacuate_large((P_)q, rtsFalse);
1558 return copy(q,size,step);
1563 /* Already evacuated, just return the forwarding address.
1564 * HOWEVER: if the requested destination generation (evac_gen) is
1565 * older than the actual generation (because the object was
1566 * already evacuated to a younger generation) then we have to
1567 * set the failed_to_evac flag to indicate that we couldn't
1568 * manage to promote the object to the desired generation.
1570 if (evac_gen > 0) { /* optimisation */
1571 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1572 if (Bdescr((P_)p)->gen->no < evac_gen) {
1573 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1574 failed_to_evac = rtsTrue;
1575 TICK_GC_FAILED_PROMOTION();
1578 return ((StgEvacuated*)q)->evacuee;
1582 nat size = arr_words_sizeW((StgArrWords *)q);
1584 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1585 evacuate_large((P_)q, rtsFalse);
1588 /* just copy the block */
1589 return copy(q,size,step);
1594 case MUT_ARR_PTRS_FROZEN:
1596 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1598 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1599 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1602 /* just copy the block */
1603 to = copy(q,size,step);
1604 if (info->type == MUT_ARR_PTRS) {
1605 recordMutable((StgMutClosure *)to);
1613 StgTSO *tso = (StgTSO *)q;
1614 nat size = tso_sizeW(tso);
1617 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1619 if (tso->what_next == ThreadRelocated) {
1620 q = (StgClosure *)tso->link;
1624 /* Large TSOs don't get moved, so no relocation is required.
1626 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1627 evacuate_large((P_)q, rtsTrue);
1630 /* To evacuate a small TSO, we need to relocate the update frame
1634 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1636 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1638 /* relocate the stack pointers... */
1639 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1640 new_tso->sp = (StgPtr)new_tso->sp + diff;
1641 new_tso->splim = (StgPtr)new_tso->splim + diff;
1643 relocate_TSO(tso, new_tso);
1645 recordMutable((StgMutClosure *)new_tso);
1646 return (StgClosure *)new_tso;
1651 case RBH: // cf. BLACKHOLE_BQ
1653 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1654 to = copy(q,BLACKHOLE_sizeW(),step);
1655 //ToDo: derive size etc from reverted IP
1656 //to = copy(q,size,step);
1657 recordMutable((StgMutClosure *)to);
1659 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1660 q, info_type(q), to, info_type(to)));
1665 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1666 to = copy(q,sizeofW(StgBlockedFetch),step);
1668 belch("@@ evacuate: %p (%s) to %p (%s)",
1669 q, info_type(q), to, info_type(to)));
1673 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1674 to = copy(q,sizeofW(StgFetchMe),step);
1676 belch("@@ evacuate: %p (%s) to %p (%s)",
1677 q, info_type(q), to, info_type(to)));
1681 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1682 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1684 belch("@@ evacuate: %p (%s) to %p (%s)",
1685 q, info_type(q), to, info_type(to)));
1690 barf("evacuate: strange closure type %d", (int)(info->type));
1696 /* -----------------------------------------------------------------------------
1697 relocate_TSO is called just after a TSO has been copied from src to
1698 dest. It adjusts the update frame list for the new location.
1699 -------------------------------------------------------------------------- */
1700 //@cindex relocate_TSO
1703 relocate_TSO(StgTSO *src, StgTSO *dest)
1710 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1714 while ((P_)su < dest->stack + dest->stack_size) {
1715 switch (get_itbl(su)->type) {
1717 /* GCC actually manages to common up these three cases! */
1720 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1725 cf = (StgCatchFrame *)su;
1726 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1731 sf = (StgSeqFrame *)su;
1732 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1741 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1749 //@node Scavenging, Reverting CAFs, Evacuation
1750 //@subsection Scavenging
1752 //@cindex scavenge_srt
1755 scavenge_srt(const StgInfoTable *info)
1757 StgClosure **srt, **srt_end;
1759 /* evacuate the SRT. If srt_len is zero, then there isn't an
1760 * srt field in the info table. That's ok, because we'll
1761 * never dereference it.
1763 srt = (StgClosure **)(info->srt);
1764 srt_end = srt + info->srt_len;
1765 for (; srt < srt_end; srt++) {
1766 /* Special-case to handle references to closures hiding out in DLLs, since
1767 double indirections required to get at those. The code generator knows
1768 which is which when generating the SRT, so it stores the (indirect)
1769 reference to the DLL closure in the table by first adding one to it.
1770 We check for this here, and undo the addition before evacuating it.
1772 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1773 closure that's fixed at link-time, and no extra magic is required.
1775 #ifdef ENABLE_WIN32_DLL_SUPPORT
1776 if ( (unsigned long)(*srt) & 0x1 ) {
1777 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1787 /* -----------------------------------------------------------------------------
1789 -------------------------------------------------------------------------- */
1792 scavengeTSO (StgTSO *tso)
1794 /* chase the link field for any TSOs on the same queue */
1795 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1796 if ( tso->why_blocked == BlockedOnMVar
1797 || tso->why_blocked == BlockedOnBlackHole
1798 || tso->why_blocked == BlockedOnException
1800 || tso->why_blocked == BlockedOnGA
1801 || tso->why_blocked == BlockedOnGA_NoSend
1804 tso->block_info.closure = evacuate(tso->block_info.closure);
1806 if ( tso->blocked_exceptions != NULL ) {
1807 tso->blocked_exceptions =
1808 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1810 /* scavenge this thread's stack */
1811 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1814 /* -----------------------------------------------------------------------------
1815 Scavenge a given step until there are no more objects in this step
1818 evac_gen is set by the caller to be either zero (for a step in a
1819 generation < N) or G where G is the generation of the step being
1822 We sometimes temporarily change evac_gen back to zero if we're
1823 scavenging a mutable object where early promotion isn't such a good
1825 -------------------------------------------------------------------------- */
1829 scavenge(step *step)
1832 const StgInfoTable *info;
1834 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1839 failed_to_evac = rtsFalse;
1841 /* scavenge phase - standard breadth-first scavenging of the
1845 while (bd != step->hp_bd || p < step->hp) {
1847 /* If we're at the end of this block, move on to the next block */
1848 if (bd != step->hp_bd && p == bd->free) {
1854 q = p; /* save ptr to object */
1856 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1857 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1859 info = get_itbl((StgClosure *)p);
1861 if (info->type==RBH)
1862 info = REVERT_INFOPTR(info);
1865 switch (info -> type) {
1869 StgBCO* bco = (StgBCO *)p;
1871 for (i = 0; i < bco->n_ptrs; i++) {
1872 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1874 p += bco_sizeW(bco);
1879 /* treat MVars specially, because we don't want to evacuate the
1880 * mut_link field in the middle of the closure.
1883 StgMVar *mvar = ((StgMVar *)p);
1885 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1886 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1887 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1888 p += sizeofW(StgMVar);
1889 evac_gen = saved_evac_gen;
1897 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1898 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1899 p += sizeofW(StgHeader) + 2;
1904 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1905 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1911 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1912 p += sizeofW(StgHeader) + 1;
1917 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1923 p += sizeofW(StgHeader) + 1;
1930 p += sizeofW(StgHeader) + 2;
1937 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1938 p += sizeofW(StgHeader) + 2;
1953 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1954 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1955 (StgClosure *)*p = evacuate((StgClosure *)*p);
1957 p += info->layout.payload.nptrs;
1962 if (step->gen->no != 0) {
1963 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1966 case IND_OLDGEN_PERM:
1967 ((StgIndOldGen *)p)->indirectee =
1968 evacuate(((StgIndOldGen *)p)->indirectee);
1969 if (failed_to_evac) {
1970 failed_to_evac = rtsFalse;
1971 recordOldToNewPtrs((StgMutClosure *)p);
1973 p += sizeofW(StgIndOldGen);
1978 StgCAF *caf = (StgCAF *)p;
1980 caf->body = evacuate(caf->body);
1981 if (failed_to_evac) {
1982 failed_to_evac = rtsFalse;
1983 recordOldToNewPtrs((StgMutClosure *)p);
1985 caf->mut_link = NULL;
1987 p += sizeofW(StgCAF);
1993 StgCAF *caf = (StgCAF *)p;
1995 caf->body = evacuate(caf->body);
1996 caf->value = evacuate(caf->value);
1997 if (failed_to_evac) {
1998 failed_to_evac = rtsFalse;
1999 recordOldToNewPtrs((StgMutClosure *)p);
2001 caf->mut_link = NULL;
2003 p += sizeofW(StgCAF);
2008 /* ignore MUT_CONSs */
2009 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
2011 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2012 evac_gen = saved_evac_gen;
2014 p += sizeofW(StgMutVar);
2018 case SE_CAF_BLACKHOLE:
2021 p += BLACKHOLE_sizeW();
2026 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2027 (StgClosure *)bh->blocking_queue =
2028 evacuate((StgClosure *)bh->blocking_queue);
2029 if (failed_to_evac) {
2030 failed_to_evac = rtsFalse;
2031 recordMutable((StgMutClosure *)bh);
2033 p += BLACKHOLE_sizeW();
2037 case THUNK_SELECTOR:
2039 StgSelector *s = (StgSelector *)p;
2040 s->selectee = evacuate(s->selectee);
2041 p += THUNK_SELECTOR_sizeW();
2047 barf("scavenge:IND???\n");
2049 case CONSTR_INTLIKE:
2050 case CONSTR_CHARLIKE:
2052 case CONSTR_NOCAF_STATIC:
2056 /* Shouldn't see a static object here. */
2057 barf("scavenge: STATIC object\n");
2069 /* Shouldn't see stack frames here. */
2070 barf("scavenge: stack frame\n");
2072 case AP_UPD: /* same as PAPs */
2074 /* Treat a PAP just like a section of stack, not forgetting to
2075 * evacuate the function pointer too...
2078 StgPAP* pap = (StgPAP *)p;
2080 pap->fun = evacuate(pap->fun);
2081 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2082 p += pap_sizeW(pap);
2087 /* nothing to follow */
2088 p += arr_words_sizeW((StgArrWords *)p);
2092 /* follow everything */
2096 evac_gen = 0; /* repeatedly mutable */
2097 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2098 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2099 (StgClosure *)*p = evacuate((StgClosure *)*p);
2101 evac_gen = saved_evac_gen;
2105 case MUT_ARR_PTRS_FROZEN:
2106 /* follow everything */
2108 StgPtr start = p, next;
2110 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2111 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2112 (StgClosure *)*p = evacuate((StgClosure *)*p);
2114 if (failed_to_evac) {
2115 /* we can do this easier... */
2116 recordMutable((StgMutClosure *)start);
2117 failed_to_evac = rtsFalse;
2124 StgTSO *tso = (StgTSO *)p;
2127 evac_gen = saved_evac_gen;
2128 p += tso_sizeW(tso);
2133 case RBH: // cf. BLACKHOLE_BQ
2135 // nat size, ptrs, nonptrs, vhs;
2137 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2138 StgRBH *rbh = (StgRBH *)p;
2139 (StgClosure *)rbh->blocking_queue =
2140 evacuate((StgClosure *)rbh->blocking_queue);
2141 if (failed_to_evac) {
2142 failed_to_evac = rtsFalse;
2143 recordMutable((StgMutClosure *)rbh);
2146 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2147 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2148 // ToDo: use size of reverted closure here!
2149 p += BLACKHOLE_sizeW();
2155 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2156 /* follow the pointer to the node which is being demanded */
2157 (StgClosure *)bf->node =
2158 evacuate((StgClosure *)bf->node);
2159 /* follow the link to the rest of the blocking queue */
2160 (StgClosure *)bf->link =
2161 evacuate((StgClosure *)bf->link);
2162 if (failed_to_evac) {
2163 failed_to_evac = rtsFalse;
2164 recordMutable((StgMutClosure *)bf);
2167 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2168 bf, info_type((StgClosure *)bf),
2169 bf->node, info_type(bf->node)));
2170 p += sizeofW(StgBlockedFetch);
2176 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2177 p, info_type((StgClosure *)p)));
2178 p += sizeofW(StgFetchMe);
2179 break; // nothing to do in this case
2181 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2183 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2184 (StgClosure *)fmbq->blocking_queue =
2185 evacuate((StgClosure *)fmbq->blocking_queue);
2186 if (failed_to_evac) {
2187 failed_to_evac = rtsFalse;
2188 recordMutable((StgMutClosure *)fmbq);
2191 belch("@@ scavenge: %p (%s) exciting, isn't it",
2192 p, info_type((StgClosure *)p)));
2193 p += sizeofW(StgFetchMeBlockingQueue);
2199 barf("scavenge: unimplemented/strange closure type %d @ %p",
2203 barf("scavenge: unimplemented/strange closure type %d @ %p",
2207 /* If we didn't manage to promote all the objects pointed to by
2208 * the current object, then we have to designate this object as
2209 * mutable (because it contains old-to-new generation pointers).
2211 if (failed_to_evac) {
2212 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2213 failed_to_evac = rtsFalse;
2221 /* -----------------------------------------------------------------------------
2222 Scavenge one object.
2224 This is used for objects that are temporarily marked as mutable
2225 because they contain old-to-new generation pointers. Only certain
2226 objects can have this property.
2227 -------------------------------------------------------------------------- */
2228 //@cindex scavenge_one
2231 scavenge_one(StgClosure *p)
2233 const StgInfoTable *info;
2236 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2237 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2242 if (info->type==RBH)
2243 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2246 switch (info -> type) {
2249 case FUN_1_0: /* hardly worth specialising these guys */
2269 case IND_OLDGEN_PERM:
2274 end = (P_)p->payload + info->layout.payload.ptrs;
2275 for (q = (P_)p->payload; q < end; q++) {
2276 (StgClosure *)*q = evacuate((StgClosure *)*q);
2282 case SE_CAF_BLACKHOLE:
2287 case THUNK_SELECTOR:
2289 StgSelector *s = (StgSelector *)p;
2290 s->selectee = evacuate(s->selectee);
2294 case AP_UPD: /* same as PAPs */
2296 /* Treat a PAP just like a section of stack, not forgetting to
2297 * evacuate the function pointer too...
2300 StgPAP* pap = (StgPAP *)p;
2302 pap->fun = evacuate(pap->fun);
2303 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2308 /* This might happen if for instance a MUT_CONS was pointing to a
2309 * THUNK which has since been updated. The IND_OLDGEN will
2310 * be on the mutable list anyway, so we don't need to do anything
2316 barf("scavenge_one: strange object %d", (int)(info->type));
2319 no_luck = failed_to_evac;
2320 failed_to_evac = rtsFalse;
2325 /* -----------------------------------------------------------------------------
2326 Scavenging mutable lists.
2328 We treat the mutable list of each generation > N (i.e. all the
2329 generations older than the one being collected) as roots. We also
2330 remove non-mutable objects from the mutable list at this point.
2331 -------------------------------------------------------------------------- */
2332 //@cindex scavenge_mut_once_list
2335 scavenge_mut_once_list(generation *gen)
2337 const StgInfoTable *info;
2338 StgMutClosure *p, *next, *new_list;
2340 p = gen->mut_once_list;
2341 new_list = END_MUT_LIST;
2345 failed_to_evac = rtsFalse;
2347 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2349 /* make sure the info pointer is into text space */
2350 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2351 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2355 if (info->type==RBH)
2356 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2358 switch(info->type) {
2361 case IND_OLDGEN_PERM:
2363 /* Try to pull the indirectee into this generation, so we can
2364 * remove the indirection from the mutable list.
2366 ((StgIndOldGen *)p)->indirectee =
2367 evacuate(((StgIndOldGen *)p)->indirectee);
2370 if (RtsFlags.DebugFlags.gc)
2371 /* Debugging code to print out the size of the thing we just
2375 StgPtr start = gen->steps[0].scan;
2376 bdescr *start_bd = gen->steps[0].scan_bd;
2378 scavenge(&gen->steps[0]);
2379 if (start_bd != gen->steps[0].scan_bd) {
2380 size += (P_)BLOCK_ROUND_UP(start) - start;
2381 start_bd = start_bd->link;
2382 while (start_bd != gen->steps[0].scan_bd) {
2383 size += BLOCK_SIZE_W;
2384 start_bd = start_bd->link;
2386 size += gen->steps[0].scan -
2387 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2389 size = gen->steps[0].scan - start;
2391 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2395 /* failed_to_evac might happen if we've got more than two
2396 * generations, we're collecting only generation 0, the
2397 * indirection resides in generation 2 and the indirectee is
2400 if (failed_to_evac) {
2401 failed_to_evac = rtsFalse;
2402 p->mut_link = new_list;
2405 /* the mut_link field of an IND_STATIC is overloaded as the
2406 * static link field too (it just so happens that we don't need
2407 * both at the same time), so we need to NULL it out when
2408 * removing this object from the mutable list because the static
2409 * link fields are all assumed to be NULL before doing a major
2417 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2418 * it from the mutable list if possible by promoting whatever it
2421 ASSERT(p->header.info == &MUT_CONS_info);
2422 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2423 /* didn't manage to promote everything, so put the
2424 * MUT_CONS back on the list.
2426 p->mut_link = new_list;
2433 StgCAF *caf = (StgCAF *)p;
2434 caf->body = evacuate(caf->body);
2435 caf->value = evacuate(caf->value);
2436 if (failed_to_evac) {
2437 failed_to_evac = rtsFalse;
2438 p->mut_link = new_list;
2448 StgCAF *caf = (StgCAF *)p;
2449 caf->body = evacuate(caf->body);
2450 if (failed_to_evac) {
2451 failed_to_evac = rtsFalse;
2452 p->mut_link = new_list;
2461 /* shouldn't have anything else on the mutables list */
2462 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2466 gen->mut_once_list = new_list;
2469 //@cindex scavenge_mutable_list
2472 scavenge_mutable_list(generation *gen)
2474 const StgInfoTable *info;
2475 StgMutClosure *p, *next;
2477 p = gen->saved_mut_list;
2481 failed_to_evac = rtsFalse;
2483 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2485 /* make sure the info pointer is into text space */
2486 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2487 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2491 if (info->type==RBH)
2492 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2494 switch(info->type) {
2496 case MUT_ARR_PTRS_FROZEN:
2497 /* remove this guy from the mutable list, but follow the ptrs
2498 * anyway (and make sure they get promoted to this gen).
2503 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2505 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2506 (StgClosure *)*q = evacuate((StgClosure *)*q);
2510 if (failed_to_evac) {
2511 failed_to_evac = rtsFalse;
2512 p->mut_link = gen->mut_list;
2519 /* follow everything */
2520 p->mut_link = gen->mut_list;
2525 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2526 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2527 (StgClosure *)*q = evacuate((StgClosure *)*q);
2533 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2534 * it from the mutable list if possible by promoting whatever it
2537 ASSERT(p->header.info != &MUT_CONS_info);
2538 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2539 p->mut_link = gen->mut_list;
2545 StgMVar *mvar = (StgMVar *)p;
2546 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2547 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2548 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2549 p->mut_link = gen->mut_list;
2556 StgTSO *tso = (StgTSO *)p;
2560 /* Don't take this TSO off the mutable list - it might still
2561 * point to some younger objects (because we set evac_gen to 0
2564 tso->mut_link = gen->mut_list;
2565 gen->mut_list = (StgMutClosure *)tso;
2571 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2572 (StgClosure *)bh->blocking_queue =
2573 evacuate((StgClosure *)bh->blocking_queue);
2574 p->mut_link = gen->mut_list;
2579 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2582 case IND_OLDGEN_PERM:
2583 /* Try to pull the indirectee into this generation, so we can
2584 * remove the indirection from the mutable list.
2587 ((StgIndOldGen *)p)->indirectee =
2588 evacuate(((StgIndOldGen *)p)->indirectee);
2591 if (failed_to_evac) {
2592 failed_to_evac = rtsFalse;
2593 p->mut_link = gen->mut_once_list;
2594 gen->mut_once_list = p;
2601 // HWL: check whether all of these are necessary
2603 case RBH: // cf. BLACKHOLE_BQ
2605 // nat size, ptrs, nonptrs, vhs;
2607 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2608 StgRBH *rbh = (StgRBH *)p;
2609 (StgClosure *)rbh->blocking_queue =
2610 evacuate((StgClosure *)rbh->blocking_queue);
2611 if (failed_to_evac) {
2612 failed_to_evac = rtsFalse;
2613 recordMutable((StgMutClosure *)rbh);
2615 // ToDo: use size of reverted closure here!
2616 p += BLACKHOLE_sizeW();
2622 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2623 /* follow the pointer to the node which is being demanded */
2624 (StgClosure *)bf->node =
2625 evacuate((StgClosure *)bf->node);
2626 /* follow the link to the rest of the blocking queue */
2627 (StgClosure *)bf->link =
2628 evacuate((StgClosure *)bf->link);
2629 if (failed_to_evac) {
2630 failed_to_evac = rtsFalse;
2631 recordMutable((StgMutClosure *)bf);
2633 p += sizeofW(StgBlockedFetch);
2638 p += sizeofW(StgFetchMe);
2639 break; // nothing to do in this case
2641 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2643 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2644 (StgClosure *)fmbq->blocking_queue =
2645 evacuate((StgClosure *)fmbq->blocking_queue);
2646 if (failed_to_evac) {
2647 failed_to_evac = rtsFalse;
2648 recordMutable((StgMutClosure *)fmbq);
2650 p += sizeofW(StgFetchMeBlockingQueue);
2656 /* shouldn't have anything else on the mutables list */
2657 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2662 //@cindex scavenge_static
2665 scavenge_static(void)
2667 StgClosure* p = static_objects;
2668 const StgInfoTable *info;
2670 /* Always evacuate straight to the oldest generation for static
2672 evac_gen = oldest_gen->no;
2674 /* keep going until we've scavenged all the objects on the linked
2676 while (p != END_OF_STATIC_LIST) {
2680 if (info->type==RBH)
2681 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2683 /* make sure the info pointer is into text space */
2684 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2685 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2687 /* Take this object *off* the static_objects list,
2688 * and put it on the scavenged_static_objects list.
2690 static_objects = STATIC_LINK(info,p);
2691 STATIC_LINK(info,p) = scavenged_static_objects;
2692 scavenged_static_objects = p;
2694 switch (info -> type) {
2698 StgInd *ind = (StgInd *)p;
2699 ind->indirectee = evacuate(ind->indirectee);
2701 /* might fail to evacuate it, in which case we have to pop it
2702 * back on the mutable list (and take it off the
2703 * scavenged_static list because the static link and mut link
2704 * pointers are one and the same).
2706 if (failed_to_evac) {
2707 failed_to_evac = rtsFalse;
2708 scavenged_static_objects = STATIC_LINK(info,p);
2709 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2710 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2724 next = (P_)p->payload + info->layout.payload.ptrs;
2725 /* evacuate the pointers */
2726 for (q = (P_)p->payload; q < next; q++) {
2727 (StgClosure *)*q = evacuate((StgClosure *)*q);
2733 barf("scavenge_static: strange closure %d", (int)(info->type));
2736 ASSERT(failed_to_evac == rtsFalse);
2738 /* get the next static object from the list. Remember, there might
2739 * be more stuff on this list now that we've done some evacuating!
2740 * (static_objects is a global)
2746 /* -----------------------------------------------------------------------------
2747 scavenge_stack walks over a section of stack and evacuates all the
2748 objects pointed to by it. We can use the same code for walking
2749 PAPs, since these are just sections of copied stack.
2750 -------------------------------------------------------------------------- */
2751 //@cindex scavenge_stack
2754 scavenge_stack(StgPtr p, StgPtr stack_end)
2757 const StgInfoTable* info;
2760 IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2763 * Each time around this loop, we are looking at a chunk of stack
2764 * that starts with either a pending argument section or an
2765 * activation record.
2768 while (p < stack_end) {
2771 /* If we've got a tag, skip over that many words on the stack */
2772 if (IS_ARG_TAG((W_)q)) {
2777 /* Is q a pointer to a closure?
2779 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2781 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2782 ASSERT(closure_STATIC((StgClosure *)q));
2784 /* otherwise, must be a pointer into the allocation space. */
2787 (StgClosure *)*p = evacuate((StgClosure *)q);
2793 * Otherwise, q must be the info pointer of an activation
2794 * record. All activation records have 'bitmap' style layout
2797 info = get_itbl((StgClosure *)p);
2799 switch (info->type) {
2801 /* Dynamic bitmap: the mask is stored on the stack */
2803 bitmap = ((StgRetDyn *)p)->liveness;
2804 p = (P_)&((StgRetDyn *)p)->payload[0];
2807 /* probably a slow-entry point return address: */
2815 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2816 old_p, p, old_p+1));
2818 p++; /* what if FHS!=1 !? -- HWL */
2823 /* Specialised code for update frames, since they're so common.
2824 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2825 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2829 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2831 nat type = get_itbl(frame->updatee)->type;
2833 p += sizeofW(StgUpdateFrame);
2834 if (type == EVACUATED) {
2835 frame->updatee = evacuate(frame->updatee);
2838 bdescr *bd = Bdescr((P_)frame->updatee);
2840 if (bd->gen->no > N) {
2841 if (bd->gen->no < evac_gen) {
2842 failed_to_evac = rtsTrue;
2847 /* Don't promote blackholes */
2849 if (!(step->gen->no == 0 &&
2851 step->no == step->gen->n_steps-1)) {
2858 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2859 sizeofW(StgHeader), step);
2860 frame->updatee = to;
2863 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2864 frame->updatee = to;
2865 recordMutable((StgMutClosure *)to);
2868 /* will never be SE_{,CAF_}BLACKHOLE, since we
2869 don't push an update frame for single-entry thunks. KSW 1999-01. */
2870 barf("scavenge_stack: UPDATE_FRAME updatee");
2875 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2882 bitmap = info->layout.bitmap;
2884 /* this assumes that the payload starts immediately after the info-ptr */
2886 while (bitmap != 0) {
2887 if ((bitmap & 1) == 0) {
2888 (StgClosure *)*p = evacuate((StgClosure *)*p);
2891 bitmap = bitmap >> 1;
2898 /* large bitmap (> 32 entries) */
2903 StgLargeBitmap *large_bitmap;
2906 large_bitmap = info->layout.large_bitmap;
2909 for (i=0; i<large_bitmap->size; i++) {
2910 bitmap = large_bitmap->bitmap[i];
2911 q = p + sizeof(W_) * 8;
2912 while (bitmap != 0) {
2913 if ((bitmap & 1) == 0) {
2914 (StgClosure *)*p = evacuate((StgClosure *)*p);
2917 bitmap = bitmap >> 1;
2919 if (i+1 < large_bitmap->size) {
2921 (StgClosure *)*p = evacuate((StgClosure *)*p);
2927 /* and don't forget to follow the SRT */
2932 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2937 /*-----------------------------------------------------------------------------
2938 scavenge the large object list.
2940 evac_gen set by caller; similar games played with evac_gen as with
2941 scavenge() - see comment at the top of scavenge(). Most large
2942 objects are (repeatedly) mutable, so most of the time evac_gen will
2944 --------------------------------------------------------------------------- */
2945 //@cindex scavenge_large
2948 scavenge_large(step *step)
2952 const StgInfoTable* info;
2953 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2955 evac_gen = 0; /* most objects are mutable */
2956 bd = step->new_large_objects;
2958 for (; bd != NULL; bd = step->new_large_objects) {
2960 /* take this object *off* the large objects list and put it on
2961 * the scavenged large objects list. This is so that we can
2962 * treat new_large_objects as a stack and push new objects on
2963 * the front when evacuating.
2965 step->new_large_objects = bd->link;
2966 dbl_link_onto(bd, &step->scavenged_large_objects);
2969 info = get_itbl((StgClosure *)p);
2971 switch (info->type) {
2973 /* only certain objects can be "large"... */
2976 /* nothing to follow */
2980 /* follow everything */
2984 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2985 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2986 (StgClosure *)*p = evacuate((StgClosure *)*p);
2991 case MUT_ARR_PTRS_FROZEN:
2992 /* follow everything */
2994 StgPtr start = p, next;
2996 evac_gen = saved_evac_gen; /* not really mutable */
2997 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2998 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2999 (StgClosure *)*p = evacuate((StgClosure *)*p);
3002 if (failed_to_evac) {
3003 recordMutable((StgMutClosure *)start);
3010 StgBCO* bco = (StgBCO *)p;
3012 evac_gen = saved_evac_gen;
3013 for (i = 0; i < bco->n_ptrs; i++) {
3014 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
3021 scavengeTSO((StgTSO *)p);
3027 StgPAP* pap = (StgPAP *)p;
3029 evac_gen = saved_evac_gen; /* not really mutable */
3030 pap->fun = evacuate(pap->fun);
3031 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3037 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3042 //@cindex zero_static_object_list
3045 zero_static_object_list(StgClosure* first_static)
3049 const StgInfoTable *info;
3051 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3053 link = STATIC_LINK(info, p);
3054 STATIC_LINK(info,p) = NULL;
3058 /* This function is only needed because we share the mutable link
3059 * field with the static link field in an IND_STATIC, so we have to
3060 * zero the mut_link field before doing a major GC, which needs the
3061 * static link field.
3063 * It doesn't do any harm to zero all the mutable link fields on the
3066 //@cindex zero_mutable_list
3069 zero_mutable_list( StgMutClosure *first )
3071 StgMutClosure *next, *c;
3073 for (c = first; c != END_MUT_LIST; c = next) {
3079 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3080 //@subsection Reverting CAFs
3082 /* -----------------------------------------------------------------------------
3084 -------------------------------------------------------------------------- */
3085 //@cindex RevertCAFs
3087 void RevertCAFs(void)
3089 while (enteredCAFs != END_CAF_LIST) {
3090 StgCAF* caf = enteredCAFs;
3092 enteredCAFs = caf->link;
3093 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3094 SET_INFO(caf,&CAF_UNENTERED_info);
3095 caf->value = (StgClosure *)0xdeadbeef;
3096 caf->link = (StgCAF *)0xdeadbeef;
3098 enteredCAFs = END_CAF_LIST;
3101 //@cindex revert_dead_CAFs
3103 void revert_dead_CAFs(void)
3105 StgCAF* caf = enteredCAFs;
3106 enteredCAFs = END_CAF_LIST;
3107 while (caf != END_CAF_LIST) {
3110 new = (StgCAF*)isAlive((StgClosure*)caf);
3112 new->link = enteredCAFs;
3116 SET_INFO(caf,&CAF_UNENTERED_info);
3117 caf->value = (StgClosure*)0xdeadbeef;
3118 caf->link = (StgCAF*)0xdeadbeef;
3124 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3125 //@subsection Sanity code for CAF garbage collection
3127 /* -----------------------------------------------------------------------------
3128 Sanity code for CAF garbage collection.
3130 With DEBUG turned on, we manage a CAF list in addition to the SRT
3131 mechanism. After GC, we run down the CAF list and blackhole any
3132 CAFs which have been garbage collected. This means we get an error
3133 whenever the program tries to enter a garbage collected CAF.
3135 Any garbage collected CAFs are taken off the CAF list at the same
3137 -------------------------------------------------------------------------- */
3147 const StgInfoTable *info;
3158 ASSERT(info->type == IND_STATIC);
3160 if (STATIC_LINK(info,p) == NULL) {
3161 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3163 SET_INFO(p,&BLACKHOLE_info);
3164 p = STATIC_LINK2(info,p);
3168 pp = &STATIC_LINK2(info,p);
3175 /* fprintf(stderr, "%d CAFs live\n", i); */
3179 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3180 //@subsection Lazy black holing
3182 /* -----------------------------------------------------------------------------
3185 Whenever a thread returns to the scheduler after possibly doing
3186 some work, we have to run down the stack and black-hole all the
3187 closures referred to by update frames.
3188 -------------------------------------------------------------------------- */
3189 //@cindex threadLazyBlackHole
3192 threadLazyBlackHole(StgTSO *tso)
3194 StgUpdateFrame *update_frame;
3195 StgBlockingQueue *bh;
3198 stack_end = &tso->stack[tso->stack_size];
3199 update_frame = tso->su;
3202 switch (get_itbl(update_frame)->type) {
3205 update_frame = ((StgCatchFrame *)update_frame)->link;
3209 bh = (StgBlockingQueue *)update_frame->updatee;
3211 /* if the thunk is already blackholed, it means we've also
3212 * already blackholed the rest of the thunks on this stack,
3213 * so we can stop early.
3215 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3216 * don't interfere with this optimisation.
3218 if (bh->header.info == &BLACKHOLE_info) {
3222 if (bh->header.info != &BLACKHOLE_BQ_info &&
3223 bh->header.info != &CAF_BLACKHOLE_info) {
3224 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3225 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3227 SET_INFO(bh,&BLACKHOLE_info);
3230 update_frame = update_frame->link;
3234 update_frame = ((StgSeqFrame *)update_frame)->link;
3240 barf("threadPaused");
3245 //@node Stack squeezing, Pausing a thread, Lazy black holing
3246 //@subsection Stack squeezing
3248 /* -----------------------------------------------------------------------------
3251 * Code largely pinched from old RTS, then hacked to bits. We also do
3252 * lazy black holing here.
3254 * -------------------------------------------------------------------------- */
3255 //@cindex threadSqueezeStack
3258 threadSqueezeStack(StgTSO *tso)
3260 lnat displacement = 0;
3261 StgUpdateFrame *frame;
3262 StgUpdateFrame *next_frame; /* Temporally next */
3263 StgUpdateFrame *prev_frame; /* Temporally previous */
3265 rtsBool prev_was_update_frame;
3267 StgUpdateFrame *top_frame;
3268 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3270 void printObj( StgClosure *obj ); // from Printer.c
3272 top_frame = tso->su;
3275 bottom = &(tso->stack[tso->stack_size]);
3278 /* There must be at least one frame, namely the STOP_FRAME.
3280 ASSERT((P_)frame < bottom);
3282 /* Walk down the stack, reversing the links between frames so that
3283 * we can walk back up as we squeeze from the bottom. Note that
3284 * next_frame and prev_frame refer to next and previous as they were
3285 * added to the stack, rather than the way we see them in this
3286 * walk. (It makes the next loop less confusing.)
3288 * Stop if we find an update frame pointing to a black hole
3289 * (see comment in threadLazyBlackHole()).
3293 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3294 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3295 prev_frame = frame->link;
3296 frame->link = next_frame;
3301 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3302 printObj((StgClosure *)prev_frame);
3303 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3306 switch (get_itbl(frame)->type) {
3307 case UPDATE_FRAME: upd_frames++;
3308 if (frame->updatee->header.info == &BLACKHOLE_info)
3311 case STOP_FRAME: stop_frames++;
3313 case CATCH_FRAME: catch_frames++;
3315 case SEQ_FRAME: seq_frames++;
3318 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3320 printObj((StgClosure *)prev_frame);
3323 if (get_itbl(frame)->type == UPDATE_FRAME
3324 && frame->updatee->header.info == &BLACKHOLE_info) {
3329 /* Now, we're at the bottom. Frame points to the lowest update
3330 * frame on the stack, and its link actually points to the frame
3331 * above. We have to walk back up the stack, squeezing out empty
3332 * update frames and turning the pointers back around on the way
3335 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3336 * we never want to eliminate it anyway. Just walk one step up
3337 * before starting to squeeze. When you get to the topmost frame,
3338 * remember that there are still some words above it that might have
3345 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3348 * Loop through all of the frames (everything except the very
3349 * bottom). Things are complicated by the fact that we have
3350 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3351 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3353 while (frame != NULL) {
3355 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3356 rtsBool is_update_frame;
3358 next_frame = frame->link;
3359 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3362 * 1. both the previous and current frame are update frames
3363 * 2. the current frame is empty
3365 if (prev_was_update_frame && is_update_frame &&
3366 (P_)prev_frame == frame_bottom + displacement) {
3368 /* Now squeeze out the current frame */
3369 StgClosure *updatee_keep = prev_frame->updatee;
3370 StgClosure *updatee_bypass = frame->updatee;
3373 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3377 /* Deal with blocking queues. If both updatees have blocked
3378 * threads, then we should merge the queues into the update
3379 * frame that we're keeping.
3381 * Alternatively, we could just wake them up: they'll just go
3382 * straight to sleep on the proper blackhole! This is less code
3383 * and probably less bug prone, although it's probably much
3386 #if 0 /* do it properly... */
3387 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3388 # error Unimplemented lazy BH warning. (KSW 1999-01)
3390 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3391 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3393 /* Sigh. It has one. Don't lose those threads! */
3394 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3395 /* Urgh. Two queues. Merge them. */
3396 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3398 while (keep_tso->link != END_TSO_QUEUE) {
3399 keep_tso = keep_tso->link;
3401 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3404 /* For simplicity, just swap the BQ for the BH */
3405 P_ temp = updatee_keep;
3407 updatee_keep = updatee_bypass;
3408 updatee_bypass = temp;
3410 /* Record the swap in the kept frame (below) */
3411 prev_frame->updatee = updatee_keep;
3416 TICK_UPD_SQUEEZED();
3417 /* wasn't there something about update squeezing and ticky to be
3418 * sorted out? oh yes: we aren't counting each enter properly
3419 * in this case. See the log somewhere. KSW 1999-04-21
3421 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3423 sp = (P_)frame - 1; /* sp = stuff to slide */
3424 displacement += sizeofW(StgUpdateFrame);
3427 /* No squeeze for this frame */
3428 sp = frame_bottom - 1; /* Keep the current frame */
3430 /* Do lazy black-holing.
3432 if (is_update_frame) {
3433 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3434 if (bh->header.info != &BLACKHOLE_info &&
3435 bh->header.info != &BLACKHOLE_BQ_info &&
3436 bh->header.info != &CAF_BLACKHOLE_info) {
3437 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3438 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3440 SET_INFO(bh,&BLACKHOLE_info);
3444 /* Fix the link in the current frame (should point to the frame below) */
3445 frame->link = prev_frame;
3446 prev_was_update_frame = is_update_frame;
3449 /* Now slide all words from sp up to the next frame */
3451 if (displacement > 0) {
3452 P_ next_frame_bottom;
3454 if (next_frame != NULL)
3455 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3457 next_frame_bottom = tso->sp - 1;
3461 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3465 while (sp >= next_frame_bottom) {
3466 sp[displacement] = *sp;
3470 (P_)prev_frame = (P_)frame + displacement;
3474 tso->sp += displacement;
3475 tso->su = prev_frame;
3478 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3479 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3483 //@node Pausing a thread, Index, Stack squeezing
3484 //@subsection Pausing a thread
3486 /* -----------------------------------------------------------------------------
3489 * We have to prepare for GC - this means doing lazy black holing
3490 * here. We also take the opportunity to do stack squeezing if it's
3492 * -------------------------------------------------------------------------- */
3493 //@cindex threadPaused
3495 threadPaused(StgTSO *tso)
3497 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3498 threadSqueezeStack(tso); /* does black holing too */
3500 threadLazyBlackHole(tso);
3503 /* -----------------------------------------------------------------------------
3505 * -------------------------------------------------------------------------- */
3508 //@cindex printMutOnceList
3510 printMutOnceList(generation *gen)
3512 StgMutClosure *p, *next;
3514 p = gen->mut_once_list;
3517 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3518 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3519 fprintf(stderr, "%p (%s), ",
3520 p, info_type((StgClosure *)p));
3522 fputc('\n', stderr);
3525 //@cindex printMutableList
3527 printMutableList(generation *gen)
3529 StgMutClosure *p, *next;
3534 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3535 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3536 fprintf(stderr, "%p (%s), ",
3537 p, info_type((StgClosure *)p));
3539 fputc('\n', stderr);
3542 //@cindex maybeLarge
3543 static inline rtsBool
3544 maybeLarge(StgClosure *closure)
3546 StgInfoTable *info = get_itbl(closure);
3548 /* closure types that may be found on the new_large_objects list;
3549 see scavenge_large */
3550 return (info->type == MUT_ARR_PTRS ||
3551 info->type == MUT_ARR_PTRS_FROZEN ||
3552 info->type == TSO ||
3553 info->type == ARR_WORDS ||
3560 //@node Index, , Pausing a thread
3564 //* GarbageCollect:: @cindex\s-+GarbageCollect
3565 //* MarkRoot:: @cindex\s-+MarkRoot
3566 //* RevertCAFs:: @cindex\s-+RevertCAFs
3567 //* addBlock:: @cindex\s-+addBlock
3568 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3569 //* copy:: @cindex\s-+copy
3570 //* copyPart:: @cindex\s-+copyPart
3571 //* evacuate:: @cindex\s-+evacuate
3572 //* evacuate_large:: @cindex\s-+evacuate_large
3573 //* gcCAFs:: @cindex\s-+gcCAFs
3574 //* isAlive:: @cindex\s-+isAlive
3575 //* maybeLarge:: @cindex\s-+maybeLarge
3576 //* mkMutCons:: @cindex\s-+mkMutCons
3577 //* printMutOnceList:: @cindex\s-+printMutOnceList
3578 //* printMutableList:: @cindex\s-+printMutableList
3579 //* relocate_TSO:: @cindex\s-+relocate_TSO
3580 //* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs
3581 //* scavenge:: @cindex\s-+scavenge
3582 //* scavenge_large:: @cindex\s-+scavenge_large
3583 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3584 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3585 //* scavenge_one:: @cindex\s-+scavenge_one
3586 //* scavenge_srt:: @cindex\s-+scavenge_srt
3587 //* scavenge_stack:: @cindex\s-+scavenge_stack
3588 //* scavenge_static:: @cindex\s-+scavenge_static
3589 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3590 //* threadPaused:: @cindex\s-+threadPaused
3591 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3592 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3593 //* upd_evacuee:: @cindex\s-+upd_evacuee
3594 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3595 //* zero_static_object_list:: @cindex\s-+zero_static_object_list