1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.69 2000/01/13 14:34:02 hwloidl Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
45 #if defined(GRAN) || defined(PAR)
46 # include "GranSimRts.h"
47 # include "ParallelRts.h"
51 # include "ParallelDebug.h"
57 //@node STATIC OBJECT LIST, Static function declarations, Includes
58 //@subsection STATIC OBJECT LIST
60 /* STATIC OBJECT LIST.
63 * We maintain a linked list of static objects that are still live.
64 * The requirements for this list are:
66 * - we need to scan the list while adding to it, in order to
67 * scavenge all the static objects (in the same way that
68 * breadth-first scavenging works for dynamic objects).
70 * - we need to be able to tell whether an object is already on
71 * the list, to break loops.
73 * Each static object has a "static link field", which we use for
74 * linking objects on to the list. We use a stack-type list, consing
75 * objects on the front as they are added (this means that the
76 * scavenge phase is depth-first, not breadth-first, but that
79 * A separate list is kept for objects that have been scavenged
80 * already - this is so that we can zero all the marks afterwards.
82 * An object is on the list if its static link field is non-zero; this
83 * means that we have to mark the end of the list with '1', not NULL.
85 * Extra notes for generational GC:
87 * Each generation has a static object list associated with it. When
88 * collecting generations up to N, we treat the static object lists
89 * from generations > N as roots.
91 * We build up a static object list while collecting generations 0..N,
92 * which is then appended to the static object list of generation N+1.
94 StgClosure* static_objects; /* live static objects */
95 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
97 /* N is the oldest generation being collected, where the generations
98 * are numbered starting at 0. A major GC (indicated by the major_gc
99 * flag) is when we're collecting all generations. We only attempt to
100 * deal with static objects and GC CAFs when doing a major GC.
103 static rtsBool major_gc;
105 /* Youngest generation that objects should be evacuated to in
106 * evacuate(). (Logically an argument to evacuate, but it's static
107 * a lot of the time so we optimise it into a global variable).
113 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
114 static rtsBool weak_done; /* all done for this pass */
116 /* Flag indicating failure to evacuate an object to the desired
119 static rtsBool failed_to_evac;
121 /* Old to-space (used for two-space collector only)
123 bdescr *old_to_space;
125 /* Data used for allocation area sizing.
127 lnat new_blocks; /* blocks allocated during this GC */
128 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
130 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
131 //@subsection Static function declarations
133 /* -----------------------------------------------------------------------------
134 Static function declarations
135 -------------------------------------------------------------------------- */
137 static StgClosure * evacuate ( StgClosure *q );
138 static void zero_static_object_list ( StgClosure* first_static );
139 static void zero_mutable_list ( StgMutClosure *first );
140 static void revert_dead_CAFs ( void );
142 static rtsBool traverse_weak_ptr_list ( void );
143 static void cleanup_weak_ptr_list ( StgWeak **list );
145 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
146 static void scavenge_large ( step *step );
147 static void scavenge ( step *step );
148 static void scavenge_static ( void );
149 static void scavenge_mutable_list ( generation *g );
150 static void scavenge_mut_once_list ( generation *g );
153 static void gcCAFs ( void );
156 //@node Garbage Collect, Weak Pointers, Static function declarations
157 //@subsection Garbage Collect
159 /* -----------------------------------------------------------------------------
162 For garbage collecting generation N (and all younger generations):
164 - follow all pointers in the root set. the root set includes all
165 mutable objects in all steps in all generations.
167 - for each pointer, evacuate the object it points to into either
168 + to-space in the next higher step in that generation, if one exists,
169 + if the object's generation == N, then evacuate it to the next
170 generation if one exists, or else to-space in the current
172 + if the object's generation < N, then evacuate it to to-space
173 in the next generation.
175 - repeatedly scavenge to-space from each step in each generation
176 being collected until no more objects can be evacuated.
178 - free from-space in each step, and set from-space = to-space.
180 -------------------------------------------------------------------------- */
181 //@cindex GarbageCollect
183 void GarbageCollect(void (*get_roots)(void))
187 lnat live, allocated, collected = 0, copied = 0;
191 CostCentreStack *prev_CCS;
194 #if defined(DEBUG) && defined(GRAN)
195 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
199 /* tell the stats department that we've started a GC */
202 /* attribute any costs to CCS_GC */
208 /* Approximate how much we allocated */
209 allocated = calcAllocated();
211 /* Figure out which generation to collect
214 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
215 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
219 major_gc = (N == RtsFlags.GcFlags.generations-1);
221 /* check stack sanity *before* GC (ToDo: check all threads) */
223 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
225 IF_DEBUG(sanity, checkFreeListSanity());
227 /* Initialise the static object lists
229 static_objects = END_OF_STATIC_LIST;
230 scavenged_static_objects = END_OF_STATIC_LIST;
232 /* zero the mutable list for the oldest generation (see comment by
233 * zero_mutable_list below).
236 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
239 /* Save the old to-space if we're doing a two-space collection
241 if (RtsFlags.GcFlags.generations == 1) {
242 old_to_space = g0s0->to_space;
243 g0s0->to_space = NULL;
246 /* Keep a count of how many new blocks we allocated during this GC
247 * (used for resizing the allocation area, later).
251 /* Initialise to-space in all the generations/steps that we're
254 for (g = 0; g <= N; g++) {
255 generations[g].mut_once_list = END_MUT_LIST;
256 generations[g].mut_list = END_MUT_LIST;
258 for (s = 0; s < generations[g].n_steps; s++) {
260 /* generation 0, step 0 doesn't need to-space */
261 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
265 /* Get a free block for to-space. Extra blocks will be chained on
269 step = &generations[g].steps[s];
270 ASSERT(step->gen->no == g);
271 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
272 bd->gen = &generations[g];
275 bd->evacuated = 1; /* it's a to-space block */
276 step->hp = bd->start;
277 step->hpLim = step->hp + BLOCK_SIZE_W;
281 step->scan = bd->start;
283 step->new_large_objects = NULL;
284 step->scavenged_large_objects = NULL;
286 /* mark the large objects as not evacuated yet */
287 for (bd = step->large_objects; bd; bd = bd->link) {
293 /* make sure the older generations have at least one block to
294 * allocate into (this makes things easier for copy(), see below.
296 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
297 for (s = 0; s < generations[g].n_steps; s++) {
298 step = &generations[g].steps[s];
299 if (step->hp_bd == NULL) {
301 bd->gen = &generations[g];
304 bd->evacuated = 0; /* *not* a to-space block */
305 step->hp = bd->start;
306 step->hpLim = step->hp + BLOCK_SIZE_W;
312 /* Set the scan pointer for older generations: remember we
313 * still have to scavenge objects that have been promoted. */
314 step->scan = step->hp;
315 step->scan_bd = step->hp_bd;
316 step->to_space = NULL;
318 step->new_large_objects = NULL;
319 step->scavenged_large_objects = NULL;
323 /* -----------------------------------------------------------------------
324 * follow all the roots that we know about:
325 * - mutable lists from each generation > N
326 * we want to *scavenge* these roots, not evacuate them: they're not
327 * going to move in this GC.
328 * Also: do them in reverse generation order. This is because we
329 * often want to promote objects that are pointed to by older
330 * generations early, so we don't have to repeatedly copy them.
331 * Doing the generations in reverse order ensures that we don't end
332 * up in the situation where we want to evac an object to gen 3 and
333 * it has already been evaced to gen 2.
337 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
338 generations[g].saved_mut_list = generations[g].mut_list;
339 generations[g].mut_list = END_MUT_LIST;
342 /* Do the mut-once lists first */
343 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
344 IF_PAR_DEBUG(verbose,
345 printMutOnceList(&generations[g]));
346 scavenge_mut_once_list(&generations[g]);
348 for (st = generations[g].n_steps-1; st >= 0; st--) {
349 scavenge(&generations[g].steps[st]);
353 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
354 IF_PAR_DEBUG(verbose,
355 printMutableList(&generations[g]));
356 scavenge_mutable_list(&generations[g]);
358 for (st = generations[g].n_steps-1; st >= 0; st--) {
359 scavenge(&generations[g].steps[st]);
364 /* follow all the roots that the application knows about.
370 /* And don't forget to mark the TSO if we got here direct from
372 /* Not needed in a seq version?
374 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
378 /* Mark the entries in the GALA table of the parallel system */
379 markLocalGAs(major_gc);
382 /* Mark the weak pointer list, and prepare to detect dead weak
385 old_weak_ptr_list = weak_ptr_list;
386 weak_ptr_list = NULL;
387 weak_done = rtsFalse;
389 /* Mark the stable pointer table.
391 markStablePtrTable(major_gc);
395 /* ToDo: To fix the caf leak, we need to make the commented out
396 * parts of this code do something sensible - as described in
399 extern void markHugsObjects(void);
404 /* -------------------------------------------------------------------------
405 * Repeatedly scavenge all the areas we know about until there's no
406 * more scavenging to be done.
413 /* scavenge static objects */
414 if (major_gc && static_objects != END_OF_STATIC_LIST) {
418 /* When scavenging the older generations: Objects may have been
419 * evacuated from generations <= N into older generations, and we
420 * need to scavenge these objects. We're going to try to ensure that
421 * any evacuations that occur move the objects into at least the
422 * same generation as the object being scavenged, otherwise we
423 * have to create new entries on the mutable list for the older
427 /* scavenge each step in generations 0..maxgen */
431 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
432 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
433 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
436 step = &generations[gen].steps[st];
438 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
443 if (step->new_large_objects != NULL) {
444 scavenge_large(step);
451 if (flag) { goto loop; }
453 /* must be last... */
454 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
459 /* Final traversal of the weak pointer list (see comment by
460 * cleanUpWeakPtrList below).
462 cleanup_weak_ptr_list(&weak_ptr_list);
464 /* Now see which stable names are still alive.
466 gcStablePtrTable(major_gc);
468 /* revert dead CAFs and update enteredCAFs list */
471 /* Set the maximum blocks for the oldest generation, based on twice
472 * the amount of live data now, adjusted to fit the maximum heap
475 * This is an approximation, since in the worst case we'll need
476 * twice the amount of live data plus whatever space the other
479 if (RtsFlags.GcFlags.generations > 1) {
481 oldest_gen->max_blocks =
482 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
483 RtsFlags.GcFlags.minOldGenSize);
484 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
485 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
486 if (((int)oldest_gen->max_blocks -
487 (int)oldest_gen->steps[0].to_blocks) <
488 (RtsFlags.GcFlags.pcFreeHeap *
489 RtsFlags.GcFlags.maxHeapSize / 200)) {
496 /* run through all the generations/steps and tidy up
498 copied = new_blocks * BLOCK_SIZE_W;
499 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
502 generations[g].collections++; /* for stats */
505 for (s = 0; s < generations[g].n_steps; s++) {
507 step = &generations[g].steps[s];
509 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
510 /* Tidy the end of the to-space chains */
511 step->hp_bd->free = step->hp;
512 step->hp_bd->link = NULL;
513 /* stats information: how much we copied */
515 copied -= step->hp_bd->start + BLOCK_SIZE_W -
520 /* for generations we collected... */
523 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
525 /* free old memory and shift to-space into from-space for all
526 * the collected steps (except the allocation area). These
527 * freed blocks will probaby be quickly recycled.
529 if (!(g == 0 && s == 0)) {
530 freeChain(step->blocks);
531 step->blocks = step->to_space;
532 step->n_blocks = step->to_blocks;
533 step->to_space = NULL;
535 for (bd = step->blocks; bd != NULL; bd = bd->link) {
536 bd->evacuated = 0; /* now from-space */
540 /* LARGE OBJECTS. The current live large objects are chained on
541 * scavenged_large, having been moved during garbage
542 * collection from large_objects. Any objects left on
543 * large_objects list are therefore dead, so we free them here.
545 for (bd = step->large_objects; bd != NULL; bd = next) {
550 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
553 step->large_objects = step->scavenged_large_objects;
555 /* Set the maximum blocks for this generation, interpolating
556 * between the maximum size of the oldest and youngest
559 * max_blocks = oldgen_max_blocks * G
560 * ----------------------
565 generations[g].max_blocks = (oldest_gen->max_blocks * g)
566 / (RtsFlags.GcFlags.generations-1);
568 generations[g].max_blocks = oldest_gen->max_blocks;
571 /* for older generations... */
574 /* For older generations, we need to append the
575 * scavenged_large_object list (i.e. large objects that have been
576 * promoted during this GC) to the large_object list for that step.
578 for (bd = step->scavenged_large_objects; bd; bd = next) {
581 dbl_link_onto(bd, &step->large_objects);
584 /* add the new blocks we promoted during this GC */
585 step->n_blocks += step->to_blocks;
590 /* Guess the amount of live data for stats. */
593 /* Free the small objects allocated via allocate(), since this will
594 * all have been copied into G0S1 now.
596 if (small_alloc_list != NULL) {
597 freeChain(small_alloc_list);
599 small_alloc_list = NULL;
603 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
605 /* Two-space collector:
606 * Free the old to-space, and estimate the amount of live data.
608 if (RtsFlags.GcFlags.generations == 1) {
611 if (old_to_space != NULL) {
612 freeChain(old_to_space);
614 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
615 bd->evacuated = 0; /* now from-space */
618 /* For a two-space collector, we need to resize the nursery. */
620 /* set up a new nursery. Allocate a nursery size based on a
621 * function of the amount of live data (currently a factor of 2,
622 * should be configurable (ToDo)). Use the blocks from the old
623 * nursery if possible, freeing up any left over blocks.
625 * If we get near the maximum heap size, then adjust our nursery
626 * size accordingly. If the nursery is the same size as the live
627 * data (L), then we need 3L bytes. We can reduce the size of the
628 * nursery to bring the required memory down near 2L bytes.
630 * A normal 2-space collector would need 4L bytes to give the same
631 * performance we get from 3L bytes, reducing to the same
632 * performance at 2L bytes.
634 blocks = g0s0->to_blocks;
636 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
637 RtsFlags.GcFlags.maxHeapSize ) {
638 int adjusted_blocks; /* signed on purpose */
641 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
642 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
643 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
644 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
647 blocks = adjusted_blocks;
650 blocks *= RtsFlags.GcFlags.oldGenFactor;
651 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
652 blocks = RtsFlags.GcFlags.minAllocAreaSize;
655 resizeNursery(blocks);
658 /* Generational collector:
659 * If the user has given us a suggested heap size, adjust our
660 * allocation area to make best use of the memory available.
663 if (RtsFlags.GcFlags.heapSizeSuggestion) {
665 nat needed = calcNeeded(); /* approx blocks needed at next GC */
667 /* Guess how much will be live in generation 0 step 0 next time.
668 * A good approximation is the obtained by finding the
669 * percentage of g0s0 that was live at the last minor GC.
672 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
675 /* Estimate a size for the allocation area based on the
676 * information available. We might end up going slightly under
677 * or over the suggested heap size, but we should be pretty
680 * Formula: suggested - needed
681 * ----------------------------
682 * 1 + g0s0_pcnt_kept/100
684 * where 'needed' is the amount of memory needed at the next
685 * collection for collecting all steps except g0s0.
688 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
689 (100 + (int)g0s0_pcnt_kept);
691 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
692 blocks = RtsFlags.GcFlags.minAllocAreaSize;
695 resizeNursery((nat)blocks);
699 /* mark the garbage collected CAFs as dead */
701 if (major_gc) { gcCAFs(); }
704 /* zero the scavenged static object list */
706 zero_static_object_list(scavenged_static_objects);
714 /* Reconstruct the Global Address tables used in GUM */
715 RebuildGAtables(major_gc);
718 /* start any pending finalizers */
719 scheduleFinalizers(old_weak_ptr_list);
721 /* check sanity after GC */
722 IF_DEBUG(sanity, checkSanity(N));
724 /* extra GC trace info */
725 IF_DEBUG(gc, stat_describe_gens());
728 /* symbol-table based profiling */
729 /* heapCensus(to_space); */ /* ToDo */
732 /* restore enclosing cost centre */
738 /* check for memory leaks if sanity checking is on */
739 IF_DEBUG(sanity, memInventory());
741 /* ok, GC over: tell the stats department what happened. */
742 stat_endGC(allocated, collected, live, copied, N);
745 //@node Weak Pointers, Evacuation, Garbage Collect
746 //@subsection Weak Pointers
748 /* -----------------------------------------------------------------------------
751 traverse_weak_ptr_list is called possibly many times during garbage
752 collection. It returns a flag indicating whether it did any work
753 (i.e. called evacuate on any live pointers).
755 Invariant: traverse_weak_ptr_list is called when the heap is in an
756 idempotent state. That means that there are no pending
757 evacuate/scavenge operations. This invariant helps the weak
758 pointer code decide which weak pointers are dead - if there are no
759 new live weak pointers, then all the currently unreachable ones are
762 For generational GC: we just don't try to finalize weak pointers in
763 older generations than the one we're collecting. This could
764 probably be optimised by keeping per-generation lists of weak
765 pointers, but for a few weak pointers this scheme will work.
766 -------------------------------------------------------------------------- */
767 //@cindex traverse_weak_ptr_list
770 traverse_weak_ptr_list(void)
772 StgWeak *w, **last_w, *next_w;
774 rtsBool flag = rtsFalse;
776 if (weak_done) { return rtsFalse; }
778 /* doesn't matter where we evacuate values/finalizers to, since
779 * these pointers are treated as roots (iff the keys are alive).
783 last_w = &old_weak_ptr_list;
784 for (w = old_weak_ptr_list; w; w = next_w) {
786 /* First, this weak pointer might have been evacuated. If so,
787 * remove the forwarding pointer from the weak_ptr_list.
789 if (get_itbl(w)->type == EVACUATED) {
790 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
794 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
795 * called on a live weak pointer object. Just remove it.
797 if (w->header.info == &DEAD_WEAK_info) {
798 next_w = ((StgDeadWeak *)w)->link;
803 ASSERT(get_itbl(w)->type == WEAK);
805 /* Now, check whether the key is reachable.
807 if ((new = isAlive(w->key))) {
809 /* evacuate the value and finalizer */
810 w->value = evacuate(w->value);
811 w->finalizer = evacuate(w->finalizer);
812 /* remove this weak ptr from the old_weak_ptr list */
814 /* and put it on the new weak ptr list */
816 w->link = weak_ptr_list;
819 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
829 /* If we didn't make any changes, then we can go round and kill all
830 * the dead weak pointers. The old_weak_ptr list is used as a list
831 * of pending finalizers later on.
833 if (flag == rtsFalse) {
834 cleanup_weak_ptr_list(&old_weak_ptr_list);
835 for (w = old_weak_ptr_list; w; w = w->link) {
836 w->finalizer = evacuate(w->finalizer);
844 /* -----------------------------------------------------------------------------
845 After GC, the live weak pointer list may have forwarding pointers
846 on it, because a weak pointer object was evacuated after being
847 moved to the live weak pointer list. We remove those forwarding
850 Also, we don't consider weak pointer objects to be reachable, but
851 we must nevertheless consider them to be "live" and retain them.
852 Therefore any weak pointer objects which haven't as yet been
853 evacuated need to be evacuated now.
854 -------------------------------------------------------------------------- */
856 //@cindex cleanup_weak_ptr_list
859 cleanup_weak_ptr_list ( StgWeak **list )
861 StgWeak *w, **last_w;
864 for (w = *list; w; w = w->link) {
866 if (get_itbl(w)->type == EVACUATED) {
867 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
871 if (Bdescr((P_)w)->evacuated == 0) {
872 (StgClosure *)w = evacuate((StgClosure *)w);
879 /* -----------------------------------------------------------------------------
880 isAlive determines whether the given closure is still alive (after
881 a garbage collection) or not. It returns the new address of the
882 closure if it is alive, or NULL otherwise.
883 -------------------------------------------------------------------------- */
888 isAlive(StgClosure *p)
890 const StgInfoTable *info;
896 /* ToDo: for static closures, check the static link field.
897 * Problem here is that we sometimes don't set the link field, eg.
898 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
901 #if 1 || !defined(PAR)
902 /* ignore closures in generations that we're not collecting. */
903 /* In GUM we use this routine when rebuilding GA tables; for some
904 reason it has problems with the LOOKS_LIKE_STATIC macro -- HWL */
905 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
910 switch (info->type) {
915 case IND_OLDGEN: /* rely on compatible layout with StgInd */
916 case IND_OLDGEN_PERM:
917 /* follow indirections */
918 p = ((StgInd *)p)->indirectee;
923 return ((StgEvacuated *)p)->evacuee;
934 MarkRoot(StgClosure *root)
936 //if (root != END_TSO_QUEUE)
937 return evacuate(root);
940 //@cindex MarkRootHWL
942 MarkRootHWL(StgClosure *root)
944 StgClosure *new = evacuate(root);
945 upd_evacuee(root, new);
950 static void addBlock(step *step)
952 bdescr *bd = allocBlock();
956 if (step->gen->no <= N) {
962 step->hp_bd->free = step->hp;
963 step->hp_bd->link = bd;
964 step->hp = bd->start;
965 step->hpLim = step->hp + BLOCK_SIZE_W;
971 //@cindex upd_evacuee
973 static __inline__ void
974 upd_evacuee(StgClosure *p, StgClosure *dest)
976 p->header.info = &EVACUATED_info;
977 ((StgEvacuated *)p)->evacuee = dest;
982 static __inline__ StgClosure *
983 copy(StgClosure *src, nat size, step *step)
987 TICK_GC_WORDS_COPIED(size);
988 /* Find out where we're going, using the handy "to" pointer in
989 * the step of the source object. If it turns out we need to
990 * evacuate to an older generation, adjust it here (see comment
993 if (step->gen->no < evac_gen) {
994 #ifdef NO_EAGER_PROMOTION
995 failed_to_evac = rtsTrue;
997 step = &generations[evac_gen].steps[0];
1001 /* chain a new block onto the to-space for the destination step if
1004 if (step->hp + size >= step->hpLim) {
1008 for(to = step->hp, from = (P_)src; size>0; --size) {
1014 upd_evacuee(src,(StgClosure *)dest);
1015 return (StgClosure *)dest;
1018 /* Special version of copy() for when we only want to copy the info
1019 * pointer of an object, but reserve some padding after it. This is
1020 * used to optimise evacuation of BLACKHOLEs.
1025 static __inline__ StgClosure *
1026 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1030 TICK_GC_WORDS_COPIED(size_to_copy);
1031 if (step->gen->no < evac_gen) {
1032 #ifdef NO_EAGER_PROMOTION
1033 failed_to_evac = rtsTrue;
1035 step = &generations[evac_gen].steps[0];
1039 if (step->hp + size_to_reserve >= step->hpLim) {
1043 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1048 step->hp += size_to_reserve;
1049 upd_evacuee(src,(StgClosure *)dest);
1050 return (StgClosure *)dest;
1053 //@node Evacuation, Scavenging, Weak Pointers
1054 //@subsection Evacuation
1056 /* -----------------------------------------------------------------------------
1057 Evacuate a large object
1059 This just consists of removing the object from the (doubly-linked)
1060 large_alloc_list, and linking it on to the (singly-linked)
1061 new_large_objects list, from where it will be scavenged later.
1063 Convention: bd->evacuated is /= 0 for a large object that has been
1064 evacuated, or 0 otherwise.
1065 -------------------------------------------------------------------------- */
1067 //@cindex evacuate_large
1070 evacuate_large(StgPtr p, rtsBool mutable)
1072 bdescr *bd = Bdescr(p);
1075 /* should point to the beginning of the block */
1076 ASSERT(((W_)p & BLOCK_MASK) == 0);
1078 /* already evacuated? */
1079 if (bd->evacuated) {
1080 /* Don't forget to set the failed_to_evac flag if we didn't get
1081 * the desired destination (see comments in evacuate()).
1083 if (bd->gen->no < evac_gen) {
1084 failed_to_evac = rtsTrue;
1085 TICK_GC_FAILED_PROMOTION();
1091 /* remove from large_object list */
1093 bd->back->link = bd->link;
1094 } else { /* first object in the list */
1095 step->large_objects = bd->link;
1098 bd->link->back = bd->back;
1101 /* link it on to the evacuated large object list of the destination step
1103 step = bd->step->to;
1104 if (step->gen->no < evac_gen) {
1105 #ifdef NO_EAGER_PROMOTION
1106 failed_to_evac = rtsTrue;
1108 step = &generations[evac_gen].steps[0];
1113 bd->gen = step->gen;
1114 bd->link = step->new_large_objects;
1115 step->new_large_objects = bd;
1119 recordMutable((StgMutClosure *)p);
1123 /* -----------------------------------------------------------------------------
1124 Adding a MUT_CONS to an older generation.
1126 This is necessary from time to time when we end up with an
1127 old-to-new generation pointer in a non-mutable object. We defer
1128 the promotion until the next GC.
1129 -------------------------------------------------------------------------- */
1134 mkMutCons(StgClosure *ptr, generation *gen)
1139 step = &gen->steps[0];
1141 /* chain a new block onto the to-space for the destination step if
1144 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1148 q = (StgMutVar *)step->hp;
1149 step->hp += sizeofW(StgMutVar);
1151 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1153 recordOldToNewPtrs((StgMutClosure *)q);
1155 return (StgClosure *)q;
1158 /* -----------------------------------------------------------------------------
1161 This is called (eventually) for every live object in the system.
1163 The caller to evacuate specifies a desired generation in the
1164 evac_gen global variable. The following conditions apply to
1165 evacuating an object which resides in generation M when we're
1166 collecting up to generation N
1170 else evac to step->to
1172 if M < evac_gen evac to evac_gen, step 0
1174 if the object is already evacuated, then we check which generation
1177 if M >= evac_gen do nothing
1178 if M < evac_gen set failed_to_evac flag to indicate that we
1179 didn't manage to evacuate this object into evac_gen.
1181 -------------------------------------------------------------------------- */
1185 evacuate(StgClosure *q)
1190 const StgInfoTable *info;
1192 nat size, ptrs, nonptrs, vhs;
1196 if (HEAP_ALLOCED(q)) {
1198 if (bd->gen->no > N) {
1199 /* Can't evacuate this object, because it's in a generation
1200 * older than the ones we're collecting. Let's hope that it's
1201 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1203 if (bd->gen->no < evac_gen) {
1205 failed_to_evac = rtsTrue;
1206 TICK_GC_FAILED_PROMOTION();
1210 step = bd->step->to;
1213 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1216 /* make sure the info pointer is into text space */
1217 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1218 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1221 if (info->type==RBH) {
1222 info = REVERT_INFOPTR(info);
1224 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1225 q, info_type(q), info, info_type_by_ip(info)));
1229 switch (info -> type) {
1233 nat size = bco_sizeW((StgBCO*)q);
1235 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1236 evacuate_large((P_)q, rtsFalse);
1239 /* just copy the block */
1240 to = copy(q,size,step);
1246 ASSERT(q->header.info != &MUT_CONS_info);
1248 to = copy(q,sizeW_fromITBL(info),step);
1249 recordMutable((StgMutClosure *)to);
1256 return copy(q,sizeofW(StgHeader)+1,step);
1258 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1263 #ifdef NO_PROMOTE_THUNKS
1264 if (bd->gen->no == 0 &&
1265 bd->step->no != 0 &&
1266 bd->step->no == bd->gen->n_steps-1) {
1270 return copy(q,sizeofW(StgHeader)+2,step);
1278 return copy(q,sizeofW(StgHeader)+2,step);
1284 case IND_OLDGEN_PERM:
1290 return copy(q,sizeW_fromITBL(info),step);
1293 case SE_CAF_BLACKHOLE:
1296 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1299 to = copy(q,BLACKHOLE_sizeW(),step);
1300 recordMutable((StgMutClosure *)to);
1303 case THUNK_SELECTOR:
1305 const StgInfoTable* selectee_info;
1306 StgClosure* selectee = ((StgSelector*)q)->selectee;
1309 selectee_info = get_itbl(selectee);
1310 switch (selectee_info->type) {
1319 StgWord32 offset = info->layout.selector_offset;
1321 /* check that the size is in range */
1323 (StgWord32)(selectee_info->layout.payload.ptrs +
1324 selectee_info->layout.payload.nptrs));
1326 /* perform the selection! */
1327 q = selectee->payload[offset];
1329 /* if we're already in to-space, there's no need to continue
1330 * with the evacuation, just update the source address with
1331 * a pointer to the (evacuated) constructor field.
1333 if (HEAP_ALLOCED(q)) {
1334 bdescr *bd = Bdescr((P_)q);
1335 if (bd->evacuated) {
1336 if (bd->gen->no < evac_gen) {
1337 failed_to_evac = rtsTrue;
1338 TICK_GC_FAILED_PROMOTION();
1344 /* otherwise, carry on and evacuate this constructor field,
1345 * (but not the constructor itself)
1354 case IND_OLDGEN_PERM:
1355 selectee = stgCast(StgInd *,selectee)->indirectee;
1359 selectee = stgCast(StgCAF *,selectee)->value;
1363 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1373 case THUNK_SELECTOR:
1374 /* aargh - do recursively???? */
1377 case SE_CAF_BLACKHOLE:
1381 /* not evaluated yet */
1385 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1386 (int)(selectee_info->type));
1389 return copy(q,THUNK_SELECTOR_sizeW(),step);
1393 /* follow chains of indirections, don't evacuate them */
1394 q = ((StgInd*)q)->indirectee;
1398 if (info->srt_len > 0 && major_gc &&
1399 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1400 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1401 static_objects = (StgClosure *)q;
1406 if (info->srt_len > 0 && major_gc &&
1407 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1408 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1409 static_objects = (StgClosure *)q;
1414 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1415 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1416 static_objects = (StgClosure *)q;
1421 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1422 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1423 static_objects = (StgClosure *)q;
1427 case CONSTR_INTLIKE:
1428 case CONSTR_CHARLIKE:
1429 case CONSTR_NOCAF_STATIC:
1430 /* no need to put these on the static linked list, they don't need
1445 /* shouldn't see these */
1446 barf("evacuate: stack frame at %p\n", q);
1450 /* these are special - the payload is a copy of a chunk of stack,
1452 return copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1455 /* Already evacuated, just return the forwarding address.
1456 * HOWEVER: if the requested destination generation (evac_gen) is
1457 * older than the actual generation (because the object was
1458 * already evacuated to a younger generation) then we have to
1459 * set the failed_to_evac flag to indicate that we couldn't
1460 * manage to promote the object to the desired generation.
1462 if (evac_gen > 0) { /* optimisation */
1463 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1464 if (Bdescr((P_)p)->gen->no < evac_gen) {
1465 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1466 failed_to_evac = rtsTrue;
1467 TICK_GC_FAILED_PROMOTION();
1470 return ((StgEvacuated*)q)->evacuee;
1474 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1476 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1477 evacuate_large((P_)q, rtsFalse);
1480 /* just copy the block */
1481 return copy(q,size,step);
1486 case MUT_ARR_PTRS_FROZEN:
1488 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1490 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1491 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1494 /* just copy the block */
1495 to = copy(q,size,step);
1496 if (info->type == MUT_ARR_PTRS) {
1497 recordMutable((StgMutClosure *)to);
1505 StgTSO *tso = stgCast(StgTSO *,q);
1506 nat size = tso_sizeW(tso);
1509 /* Large TSOs don't get moved, so no relocation is required.
1511 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1512 evacuate_large((P_)q, rtsTrue);
1515 /* To evacuate a small TSO, we need to relocate the update frame
1519 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1521 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1523 /* relocate the stack pointers... */
1524 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1525 new_tso->sp = (StgPtr)new_tso->sp + diff;
1526 new_tso->splim = (StgPtr)new_tso->splim + diff;
1528 relocate_TSO(tso, new_tso);
1530 recordMutable((StgMutClosure *)new_tso);
1531 return (StgClosure *)new_tso;
1536 case RBH: // cf. BLACKHOLE_BQ
1538 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1539 to = copy(q,BLACKHOLE_sizeW(),step);
1540 //ToDo: derive size etc from reverted IP
1541 //to = copy(q,size,step);
1542 recordMutable((StgMutClosure *)to);
1544 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1545 q, info_type(q), to, info_type(to)));
1550 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1551 to = copy(q,sizeofW(StgBlockedFetch),step);
1553 belch("@@ evacuate: %p (%s) to %p (%s)",
1554 q, info_type(q), to, info_type(to)));
1558 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1559 to = copy(q,sizeofW(StgFetchMe),step);
1561 belch("@@ evacuate: %p (%s) to %p (%s)",
1562 q, info_type(q), to, info_type(to)));
1566 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1567 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1569 belch("@@ evacuate: %p (%s) to %p (%s)",
1570 q, info_type(q), to, info_type(to)));
1575 barf("evacuate: strange closure type %d", (int)(info->type));
1581 /* -----------------------------------------------------------------------------
1582 relocate_TSO is called just after a TSO has been copied from src to
1583 dest. It adjusts the update frame list for the new location.
1584 -------------------------------------------------------------------------- */
1585 //@cindex relocate_TSO
1588 relocate_TSO(StgTSO *src, StgTSO *dest)
1595 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1599 while ((P_)su < dest->stack + dest->stack_size) {
1600 switch (get_itbl(su)->type) {
1602 /* GCC actually manages to common up these three cases! */
1605 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1610 cf = (StgCatchFrame *)su;
1611 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1616 sf = (StgSeqFrame *)su;
1617 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1626 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1634 //@node Scavenging, Reverting CAFs, Evacuation
1635 //@subsection Scavenging
1637 //@cindex scavenge_srt
1640 scavenge_srt(const StgInfoTable *info)
1642 StgClosure **srt, **srt_end;
1644 /* evacuate the SRT. If srt_len is zero, then there isn't an
1645 * srt field in the info table. That's ok, because we'll
1646 * never dereference it.
1648 srt = stgCast(StgClosure **,info->srt);
1649 srt_end = srt + info->srt_len;
1650 for (; srt < srt_end; srt++) {
1651 /* Special-case to handle references to closures hiding out in DLLs, since
1652 double indirections required to get at those. The code generator knows
1653 which is which when generating the SRT, so it stores the (indirect)
1654 reference to the DLL closure in the table by first adding one to it.
1655 We check for this here, and undo the addition before evacuating it.
1657 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1658 closure that's fixed at link-time, and no extra magic is required.
1660 #ifdef ENABLE_WIN32_DLL_SUPPORT
1661 if ( stgCast(unsigned long,*srt) & 0x1 ) {
1662 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1672 /* -----------------------------------------------------------------------------
1674 -------------------------------------------------------------------------- */
1677 scavengeTSO (StgTSO *tso)
1679 /* chase the link field for any TSOs on the same queue */
1680 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1681 if ( tso->why_blocked == BlockedOnMVar
1682 || tso->why_blocked == BlockedOnBlackHole
1683 || tso->why_blocked == BlockedOnException) {
1684 tso->block_info.closure = evacuate(tso->block_info.closure);
1686 if ( tso->blocked_exceptions != NULL ) {
1687 tso->blocked_exceptions =
1688 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1690 /* scavenge this thread's stack */
1691 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1694 /* -----------------------------------------------------------------------------
1695 Scavenge a given step until there are no more objects in this step
1698 evac_gen is set by the caller to be either zero (for a step in a
1699 generation < N) or G where G is the generation of the step being
1702 We sometimes temporarily change evac_gen back to zero if we're
1703 scavenging a mutable object where early promotion isn't such a good
1705 -------------------------------------------------------------------------- */
1709 scavenge(step *step)
1712 const StgInfoTable *info;
1714 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1719 failed_to_evac = rtsFalse;
1721 /* scavenge phase - standard breadth-first scavenging of the
1725 while (bd != step->hp_bd || p < step->hp) {
1727 /* If we're at the end of this block, move on to the next block */
1728 if (bd != step->hp_bd && p == bd->free) {
1734 q = p; /* save ptr to object */
1736 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1737 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1739 info = get_itbl((StgClosure *)p);
1741 if (info->type==RBH)
1742 info = REVERT_INFOPTR(info);
1745 switch (info -> type) {
1749 StgBCO* bco = stgCast(StgBCO*,p);
1751 for (i = 0; i < bco->n_ptrs; i++) {
1752 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1754 p += bco_sizeW(bco);
1759 /* treat MVars specially, because we don't want to evacuate the
1760 * mut_link field in the middle of the closure.
1763 StgMVar *mvar = ((StgMVar *)p);
1765 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1766 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1767 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1768 p += sizeofW(StgMVar);
1769 evac_gen = saved_evac_gen;
1777 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1778 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1779 p += sizeofW(StgHeader) + 2;
1784 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1785 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1791 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1792 p += sizeofW(StgHeader) + 1;
1797 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1803 p += sizeofW(StgHeader) + 1;
1810 p += sizeofW(StgHeader) + 2;
1817 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1818 p += sizeofW(StgHeader) + 2;
1833 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1834 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1835 (StgClosure *)*p = evacuate((StgClosure *)*p);
1837 p += info->layout.payload.nptrs;
1842 if (step->gen->no != 0) {
1843 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1846 case IND_OLDGEN_PERM:
1847 ((StgIndOldGen *)p)->indirectee =
1848 evacuate(((StgIndOldGen *)p)->indirectee);
1849 if (failed_to_evac) {
1850 failed_to_evac = rtsFalse;
1851 recordOldToNewPtrs((StgMutClosure *)p);
1853 p += sizeofW(StgIndOldGen);
1858 StgCAF *caf = (StgCAF *)p;
1860 caf->body = evacuate(caf->body);
1861 if (failed_to_evac) {
1862 failed_to_evac = rtsFalse;
1863 recordOldToNewPtrs((StgMutClosure *)p);
1865 caf->mut_link = NULL;
1867 p += sizeofW(StgCAF);
1873 StgCAF *caf = (StgCAF *)p;
1875 caf->body = evacuate(caf->body);
1876 caf->value = evacuate(caf->value);
1877 if (failed_to_evac) {
1878 failed_to_evac = rtsFalse;
1879 recordOldToNewPtrs((StgMutClosure *)p);
1881 caf->mut_link = NULL;
1883 p += sizeofW(StgCAF);
1888 /* ignore MUT_CONSs */
1889 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1891 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1892 evac_gen = saved_evac_gen;
1894 p += sizeofW(StgMutVar);
1898 case SE_CAF_BLACKHOLE:
1901 p += BLACKHOLE_sizeW();
1906 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1907 (StgClosure *)bh->blocking_queue =
1908 evacuate((StgClosure *)bh->blocking_queue);
1909 if (failed_to_evac) {
1910 failed_to_evac = rtsFalse;
1911 recordMutable((StgMutClosure *)bh);
1913 p += BLACKHOLE_sizeW();
1917 case THUNK_SELECTOR:
1919 StgSelector *s = (StgSelector *)p;
1920 s->selectee = evacuate(s->selectee);
1921 p += THUNK_SELECTOR_sizeW();
1927 barf("scavenge:IND???\n");
1929 case CONSTR_INTLIKE:
1930 case CONSTR_CHARLIKE:
1932 case CONSTR_NOCAF_STATIC:
1936 /* Shouldn't see a static object here. */
1937 barf("scavenge: STATIC object\n");
1949 /* Shouldn't see stack frames here. */
1950 barf("scavenge: stack frame\n");
1952 case AP_UPD: /* same as PAPs */
1954 /* Treat a PAP just like a section of stack, not forgetting to
1955 * evacuate the function pointer too...
1958 StgPAP* pap = stgCast(StgPAP*,p);
1960 pap->fun = evacuate(pap->fun);
1961 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1962 p += pap_sizeW(pap);
1967 /* nothing to follow */
1968 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1972 /* follow everything */
1976 evac_gen = 0; /* repeatedly mutable */
1977 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1978 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1979 (StgClosure *)*p = evacuate((StgClosure *)*p);
1981 evac_gen = saved_evac_gen;
1985 case MUT_ARR_PTRS_FROZEN:
1986 /* follow everything */
1988 StgPtr start = p, next;
1990 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1991 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1992 (StgClosure *)*p = evacuate((StgClosure *)*p);
1994 if (failed_to_evac) {
1995 /* we can do this easier... */
1996 recordMutable((StgMutClosure *)start);
1997 failed_to_evac = rtsFalse;
2004 StgTSO *tso = (StgTSO *)p;
2007 evac_gen = saved_evac_gen;
2008 p += tso_sizeW(tso);
2013 case RBH: // cf. BLACKHOLE_BQ
2015 // nat size, ptrs, nonptrs, vhs;
2017 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2018 StgRBH *rbh = (StgRBH *)p;
2019 (StgClosure *)rbh->blocking_queue =
2020 evacuate((StgClosure *)rbh->blocking_queue);
2021 if (failed_to_evac) {
2022 failed_to_evac = rtsFalse;
2023 recordMutable((StgMutClosure *)rbh);
2026 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2027 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2028 // ToDo: use size of reverted closure here!
2029 p += BLACKHOLE_sizeW();
2035 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2036 /* follow the pointer to the node which is being demanded */
2037 (StgClosure *)bf->node =
2038 evacuate((StgClosure *)bf->node);
2039 /* follow the link to the rest of the blocking queue */
2040 (StgClosure *)bf->link =
2041 evacuate((StgClosure *)bf->link);
2042 if (failed_to_evac) {
2043 failed_to_evac = rtsFalse;
2044 recordMutable((StgMutClosure *)bf);
2047 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2048 bf, info_type((StgClosure *)bf),
2049 bf->node, info_type(bf->node)));
2050 p += sizeofW(StgBlockedFetch);
2056 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2057 p, info_type((StgClosure *)p)));
2058 p += sizeofW(StgFetchMe);
2059 break; // nothing to do in this case
2061 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2063 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2064 (StgClosure *)fmbq->blocking_queue =
2065 evacuate((StgClosure *)fmbq->blocking_queue);
2066 if (failed_to_evac) {
2067 failed_to_evac = rtsFalse;
2068 recordMutable((StgMutClosure *)fmbq);
2071 belch("@@ scavenge: %p (%s) exciting, isn't it",
2072 p, info_type((StgClosure *)p)));
2073 p += sizeofW(StgFetchMeBlockingQueue);
2079 barf("scavenge: unimplemented/strange closure type\n");
2085 /* If we didn't manage to promote all the objects pointed to by
2086 * the current object, then we have to designate this object as
2087 * mutable (because it contains old-to-new generation pointers).
2089 if (failed_to_evac) {
2090 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2091 failed_to_evac = rtsFalse;
2099 /* -----------------------------------------------------------------------------
2100 Scavenge one object.
2102 This is used for objects that are temporarily marked as mutable
2103 because they contain old-to-new generation pointers. Only certain
2104 objects can have this property.
2105 -------------------------------------------------------------------------- */
2106 //@cindex scavenge_one
2109 scavenge_one(StgClosure *p)
2111 const StgInfoTable *info;
2114 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2115 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2120 if (info->type==RBH)
2121 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2124 switch (info -> type) {
2127 case FUN_1_0: /* hardly worth specialising these guys */
2147 case IND_OLDGEN_PERM:
2152 end = (P_)p->payload + info->layout.payload.ptrs;
2153 for (q = (P_)p->payload; q < end; q++) {
2154 (StgClosure *)*q = evacuate((StgClosure *)*q);
2160 case SE_CAF_BLACKHOLE:
2165 case THUNK_SELECTOR:
2167 StgSelector *s = (StgSelector *)p;
2168 s->selectee = evacuate(s->selectee);
2172 case AP_UPD: /* same as PAPs */
2174 /* Treat a PAP just like a section of stack, not forgetting to
2175 * evacuate the function pointer too...
2178 StgPAP* pap = (StgPAP *)p;
2180 pap->fun = evacuate(pap->fun);
2181 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2186 /* This might happen if for instance a MUT_CONS was pointing to a
2187 * THUNK which has since been updated. The IND_OLDGEN will
2188 * be on the mutable list anyway, so we don't need to do anything
2194 barf("scavenge_one: strange object");
2197 no_luck = failed_to_evac;
2198 failed_to_evac = rtsFalse;
2203 /* -----------------------------------------------------------------------------
2204 Scavenging mutable lists.
2206 We treat the mutable list of each generation > N (i.e. all the
2207 generations older than the one being collected) as roots. We also
2208 remove non-mutable objects from the mutable list at this point.
2209 -------------------------------------------------------------------------- */
2210 //@cindex scavenge_mut_once_list
2213 scavenge_mut_once_list(generation *gen)
2215 const StgInfoTable *info;
2216 StgMutClosure *p, *next, *new_list;
2218 p = gen->mut_once_list;
2219 new_list = END_MUT_LIST;
2223 failed_to_evac = rtsFalse;
2225 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2227 /* make sure the info pointer is into text space */
2228 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2229 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2233 if (info->type==RBH)
2234 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2236 switch(info->type) {
2239 case IND_OLDGEN_PERM:
2241 /* Try to pull the indirectee into this generation, so we can
2242 * remove the indirection from the mutable list.
2244 ((StgIndOldGen *)p)->indirectee =
2245 evacuate(((StgIndOldGen *)p)->indirectee);
2248 if (RtsFlags.DebugFlags.gc)
2249 /* Debugging code to print out the size of the thing we just
2253 StgPtr start = gen->steps[0].scan;
2254 bdescr *start_bd = gen->steps[0].scan_bd;
2256 scavenge(&gen->steps[0]);
2257 if (start_bd != gen->steps[0].scan_bd) {
2258 size += (P_)BLOCK_ROUND_UP(start) - start;
2259 start_bd = start_bd->link;
2260 while (start_bd != gen->steps[0].scan_bd) {
2261 size += BLOCK_SIZE_W;
2262 start_bd = start_bd->link;
2264 size += gen->steps[0].scan -
2265 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2267 size = gen->steps[0].scan - start;
2269 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2273 /* failed_to_evac might happen if we've got more than two
2274 * generations, we're collecting only generation 0, the
2275 * indirection resides in generation 2 and the indirectee is
2278 if (failed_to_evac) {
2279 failed_to_evac = rtsFalse;
2280 p->mut_link = new_list;
2283 /* the mut_link field of an IND_STATIC is overloaded as the
2284 * static link field too (it just so happens that we don't need
2285 * both at the same time), so we need to NULL it out when
2286 * removing this object from the mutable list because the static
2287 * link fields are all assumed to be NULL before doing a major
2295 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2296 * it from the mutable list if possible by promoting whatever it
2299 ASSERT(p->header.info == &MUT_CONS_info);
2300 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2301 /* didn't manage to promote everything, so put the
2302 * MUT_CONS back on the list.
2304 p->mut_link = new_list;
2311 StgCAF *caf = (StgCAF *)p;
2312 caf->body = evacuate(caf->body);
2313 caf->value = evacuate(caf->value);
2314 if (failed_to_evac) {
2315 failed_to_evac = rtsFalse;
2316 p->mut_link = new_list;
2326 StgCAF *caf = (StgCAF *)p;
2327 caf->body = evacuate(caf->body);
2328 if (failed_to_evac) {
2329 failed_to_evac = rtsFalse;
2330 p->mut_link = new_list;
2339 /* shouldn't have anything else on the mutables list */
2340 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2344 gen->mut_once_list = new_list;
2347 //@cindex scavenge_mutable_list
2350 scavenge_mutable_list(generation *gen)
2352 const StgInfoTable *info;
2353 StgMutClosure *p, *next;
2355 p = gen->saved_mut_list;
2359 failed_to_evac = rtsFalse;
2361 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2363 /* make sure the info pointer is into text space */
2364 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2365 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2369 if (info->type==RBH)
2370 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2372 switch(info->type) {
2374 case MUT_ARR_PTRS_FROZEN:
2375 /* remove this guy from the mutable list, but follow the ptrs
2376 * anyway (and make sure they get promoted to this gen).
2382 belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS_FROZEN %p; size: %#x ; next: %p",
2383 p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
2385 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2387 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2388 (StgClosure *)*q = evacuate((StgClosure *)*q);
2392 if (failed_to_evac) {
2393 failed_to_evac = rtsFalse;
2394 p->mut_link = gen->mut_list;
2401 /* follow everything */
2402 p->mut_link = gen->mut_list;
2408 belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS %p; size: %#x ; next: %p",
2409 p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
2411 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2412 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2413 (StgClosure *)*q = evacuate((StgClosure *)*q);
2419 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2420 * it from the mutable list if possible by promoting whatever it
2424 belch("@@ scavenge_mut_list: scavenging MUT_VAR %p; var: %p ; next: %p",
2425 p, ((StgMutVar *)p)->var, p->mut_link));
2427 ASSERT(p->header.info != &MUT_CONS_info);
2428 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2429 p->mut_link = gen->mut_list;
2435 StgMVar *mvar = (StgMVar *)p;
2438 belch("@@ scavenge_mut_list: scavenging MAVR %p; head: %p; tail: %p; value: %p ; next: %p",
2439 mvar, mvar->head, mvar->tail, mvar->value, p->mut_link));
2441 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2442 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2443 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2444 p->mut_link = gen->mut_list;
2451 StgTSO *tso = (StgTSO *)p;
2455 /* Don't take this TSO off the mutable list - it might still
2456 * point to some younger objects (because we set evac_gen to 0
2459 tso->mut_link = gen->mut_list;
2460 gen->mut_list = (StgMutClosure *)tso;
2466 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2469 belch("@@ scavenge_mut_list: scavenging BLACKHOLE_BQ (%p); next: %p",
2472 (StgClosure *)bh->blocking_queue =
2473 evacuate((StgClosure *)bh->blocking_queue);
2474 p->mut_link = gen->mut_list;
2479 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2482 case IND_OLDGEN_PERM:
2483 /* Try to pull the indirectee into this generation, so we can
2484 * remove the indirection from the mutable list.
2487 ((StgIndOldGen *)p)->indirectee =
2488 evacuate(((StgIndOldGen *)p)->indirectee);
2491 if (failed_to_evac) {
2492 failed_to_evac = rtsFalse;
2493 p->mut_link = gen->mut_once_list;
2494 gen->mut_once_list = p;
2500 // HWL: old PAR code deleted here
2503 /* shouldn't have anything else on the mutables list */
2504 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2509 //@cindex scavenge_static
2512 scavenge_static(void)
2514 StgClosure* p = static_objects;
2515 const StgInfoTable *info;
2517 /* Always evacuate straight to the oldest generation for static
2519 evac_gen = oldest_gen->no;
2521 /* keep going until we've scavenged all the objects on the linked
2523 while (p != END_OF_STATIC_LIST) {
2527 if (info->type==RBH)
2528 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2530 /* make sure the info pointer is into text space */
2531 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2532 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2534 /* Take this object *off* the static_objects list,
2535 * and put it on the scavenged_static_objects list.
2537 static_objects = STATIC_LINK(info,p);
2538 STATIC_LINK(info,p) = scavenged_static_objects;
2539 scavenged_static_objects = p;
2541 switch (info -> type) {
2545 StgInd *ind = (StgInd *)p;
2546 ind->indirectee = evacuate(ind->indirectee);
2548 /* might fail to evacuate it, in which case we have to pop it
2549 * back on the mutable list (and take it off the
2550 * scavenged_static list because the static link and mut link
2551 * pointers are one and the same).
2553 if (failed_to_evac) {
2554 failed_to_evac = rtsFalse;
2555 scavenged_static_objects = STATIC_LINK(info,p);
2556 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2557 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2571 next = (P_)p->payload + info->layout.payload.ptrs;
2572 /* evacuate the pointers */
2573 for (q = (P_)p->payload; q < next; q++) {
2574 (StgClosure *)*q = evacuate((StgClosure *)*q);
2580 barf("scavenge_static");
2583 ASSERT(failed_to_evac == rtsFalse);
2585 /* get the next static object from the list. Remeber, there might
2586 * be more stuff on this list now that we've done some evacuating!
2587 * (static_objects is a global)
2593 /* -----------------------------------------------------------------------------
2594 scavenge_stack walks over a section of stack and evacuates all the
2595 objects pointed to by it. We can use the same code for walking
2596 PAPs, since these are just sections of copied stack.
2597 -------------------------------------------------------------------------- */
2598 //@cindex scavenge_stack
2601 scavenge_stack(StgPtr p, StgPtr stack_end)
2604 const StgInfoTable* info;
2607 IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2610 * Each time around this loop, we are looking at a chunk of stack
2611 * that starts with either a pending argument section or an
2612 * activation record.
2615 while (p < stack_end) {
2618 /* If we've got a tag, skip over that many words on the stack */
2619 if (IS_ARG_TAG((W_)q)) {
2624 /* Is q a pointer to a closure?
2626 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2628 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2629 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
2631 /* otherwise, must be a pointer into the allocation space. */
2634 (StgClosure *)*p = evacuate((StgClosure *)q);
2640 * Otherwise, q must be the info pointer of an activation
2641 * record. All activation records have 'bitmap' style layout
2644 info = get_itbl((StgClosure *)p);
2646 switch (info->type) {
2648 /* Dynamic bitmap: the mask is stored on the stack */
2650 bitmap = ((StgRetDyn *)p)->liveness;
2651 p = (P_)&((StgRetDyn *)p)->payload[0];
2654 /* probably a slow-entry point return address: */
2662 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2663 old_p, p, old_p+1));
2665 p++; /* what if FHS!=1 !? -- HWL */
2670 /* Specialised code for update frames, since they're so common.
2671 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2672 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2676 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2678 nat type = get_itbl(frame->updatee)->type;
2680 p += sizeofW(StgUpdateFrame);
2681 if (type == EVACUATED) {
2682 frame->updatee = evacuate(frame->updatee);
2685 bdescr *bd = Bdescr((P_)frame->updatee);
2687 if (bd->gen->no > N) {
2688 if (bd->gen->no < evac_gen) {
2689 failed_to_evac = rtsTrue;
2694 /* Don't promote blackholes */
2696 if (!(step->gen->no == 0 &&
2698 step->no == step->gen->n_steps-1)) {
2705 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2706 sizeofW(StgHeader), step);
2707 frame->updatee = to;
2710 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2711 frame->updatee = to;
2712 recordMutable((StgMutClosure *)to);
2715 /* will never be SE_{,CAF_}BLACKHOLE, since we
2716 don't push an update frame for single-entry thunks. KSW 1999-01. */
2717 barf("scavenge_stack: UPDATE_FRAME updatee");
2722 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2727 StgPtr old_p = p; // debugging only -- HWL
2728 /* stack frames like these are ordinary closures and therefore may
2729 contain setup-specific fixed-header words (as in GranSim!);
2730 therefore, these cases should not use p++ but &(p->payload) -- HWL */
2731 IF_DEBUG(gran, IF_DEBUG(sanity, printObj(p)));
2732 bitmap = info->layout.bitmap;
2734 p = (StgPtr)&(((StgClosure *)p)->payload);
2736 belch("HWL: scavenge_stack: (STOP|CATCH|SEQ)_FRAME adjusting p from %p to %p (instead of %p)",
2737 old_p, p, old_p+1));
2743 bitmap = info->layout.bitmap;
2745 /* this assumes that the payload starts immediately after the info-ptr */
2747 while (bitmap != 0) {
2748 if ((bitmap & 1) == 0) {
2749 (StgClosure *)*p = evacuate((StgClosure *)*p);
2752 bitmap = bitmap >> 1;
2759 /* large bitmap (> 32 entries) */
2764 StgLargeBitmap *large_bitmap;
2767 large_bitmap = info->layout.large_bitmap;
2770 for (i=0; i<large_bitmap->size; i++) {
2771 bitmap = large_bitmap->bitmap[i];
2772 q = p + sizeof(W_) * 8;
2773 while (bitmap != 0) {
2774 if ((bitmap & 1) == 0) {
2775 (StgClosure *)*p = evacuate((StgClosure *)*p);
2778 bitmap = bitmap >> 1;
2780 if (i+1 < large_bitmap->size) {
2782 (StgClosure *)*p = evacuate((StgClosure *)*p);
2788 /* and don't forget to follow the SRT */
2793 barf("scavenge_stack: weird activation record found on stack.\n");
2798 /*-----------------------------------------------------------------------------
2799 scavenge the large object list.
2801 evac_gen set by caller; similar games played with evac_gen as with
2802 scavenge() - see comment at the top of scavenge(). Most large
2803 objects are (repeatedly) mutable, so most of the time evac_gen will
2805 --------------------------------------------------------------------------- */
2806 //@cindex scavenge_large
2809 scavenge_large(step *step)
2813 const StgInfoTable* info;
2814 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2816 evac_gen = 0; /* most objects are mutable */
2817 bd = step->new_large_objects;
2819 for (; bd != NULL; bd = step->new_large_objects) {
2821 /* take this object *off* the large objects list and put it on
2822 * the scavenged large objects list. This is so that we can
2823 * treat new_large_objects as a stack and push new objects on
2824 * the front when evacuating.
2826 step->new_large_objects = bd->link;
2827 dbl_link_onto(bd, &step->scavenged_large_objects);
2830 info = get_itbl(stgCast(StgClosure*,p));
2832 switch (info->type) {
2834 /* only certain objects can be "large"... */
2837 /* nothing to follow */
2841 /* follow everything */
2845 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2846 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2847 (StgClosure *)*p = evacuate((StgClosure *)*p);
2852 case MUT_ARR_PTRS_FROZEN:
2853 /* follow everything */
2855 StgPtr start = p, next;
2857 evac_gen = saved_evac_gen; /* not really mutable */
2858 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2859 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2860 (StgClosure *)*p = evacuate((StgClosure *)*p);
2863 if (failed_to_evac) {
2864 recordMutable((StgMutClosure *)start);
2871 StgBCO* bco = stgCast(StgBCO*,p);
2873 evac_gen = saved_evac_gen;
2874 for (i = 0; i < bco->n_ptrs; i++) {
2875 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2882 scavengeTSO((StgTSO *)p);
2883 // HWL: old PAR code deleted here
2887 barf("scavenge_large: unknown/strange object");
2892 //@cindex zero_static_object_list
2895 zero_static_object_list(StgClosure* first_static)
2899 const StgInfoTable *info;
2901 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2903 link = STATIC_LINK(info, p);
2904 STATIC_LINK(info,p) = NULL;
2908 /* This function is only needed because we share the mutable link
2909 * field with the static link field in an IND_STATIC, so we have to
2910 * zero the mut_link field before doing a major GC, which needs the
2911 * static link field.
2913 * It doesn't do any harm to zero all the mutable link fields on the
2916 //@cindex zero_mutable_list
2919 zero_mutable_list( StgMutClosure *first )
2921 StgMutClosure *next, *c;
2923 for (c = first; c != END_MUT_LIST; c = next) {
2929 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
2930 //@subsection Reverting CAFs
2932 /* -----------------------------------------------------------------------------
2934 -------------------------------------------------------------------------- */
2935 //@cindex RevertCAFs
2937 void RevertCAFs(void)
2939 while (enteredCAFs != END_CAF_LIST) {
2940 StgCAF* caf = enteredCAFs;
2942 enteredCAFs = caf->link;
2943 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2944 SET_INFO(caf,&CAF_UNENTERED_info);
2945 caf->value = stgCast(StgClosure*,0xdeadbeef);
2946 caf->link = stgCast(StgCAF*,0xdeadbeef);
2948 enteredCAFs = END_CAF_LIST;
2951 //@cindex revert_dead_CAFs
2953 void revert_dead_CAFs(void)
2955 StgCAF* caf = enteredCAFs;
2956 enteredCAFs = END_CAF_LIST;
2957 while (caf != END_CAF_LIST) {
2960 new = (StgCAF*)isAlive((StgClosure*)caf);
2962 new->link = enteredCAFs;
2966 SET_INFO(caf,&CAF_UNENTERED_info);
2967 caf->value = (StgClosure*)0xdeadbeef;
2968 caf->link = (StgCAF*)0xdeadbeef;
2974 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
2975 //@subsection Sanity code for CAF garbage collection
2977 /* -----------------------------------------------------------------------------
2978 Sanity code for CAF garbage collection.
2980 With DEBUG turned on, we manage a CAF list in addition to the SRT
2981 mechanism. After GC, we run down the CAF list and blackhole any
2982 CAFs which have been garbage collected. This means we get an error
2983 whenever the program tries to enter a garbage collected CAF.
2985 Any garbage collected CAFs are taken off the CAF list at the same
2987 -------------------------------------------------------------------------- */
2997 const StgInfoTable *info;
3008 ASSERT(info->type == IND_STATIC);
3010 if (STATIC_LINK(info,p) == NULL) {
3011 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3013 SET_INFO(p,&BLACKHOLE_info);
3014 p = STATIC_LINK2(info,p);
3018 pp = &STATIC_LINK2(info,p);
3025 /* fprintf(stderr, "%d CAFs live\n", i); */
3029 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3030 //@subsection Lazy black holing
3032 /* -----------------------------------------------------------------------------
3035 Whenever a thread returns to the scheduler after possibly doing
3036 some work, we have to run down the stack and black-hole all the
3037 closures referred to by update frames.
3038 -------------------------------------------------------------------------- */
3039 //@cindex threadLazyBlackHole
3042 threadLazyBlackHole(StgTSO *tso)
3044 StgUpdateFrame *update_frame;
3045 StgBlockingQueue *bh;
3048 stack_end = &tso->stack[tso->stack_size];
3049 update_frame = tso->su;
3052 switch (get_itbl(update_frame)->type) {
3055 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
3059 bh = (StgBlockingQueue *)update_frame->updatee;
3061 /* if the thunk is already blackholed, it means we've also
3062 * already blackholed the rest of the thunks on this stack,
3063 * so we can stop early.
3065 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3066 * don't interfere with this optimisation.
3068 if (bh->header.info == &BLACKHOLE_info) {
3072 if (bh->header.info != &BLACKHOLE_BQ_info &&
3073 bh->header.info != &CAF_BLACKHOLE_info) {
3074 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3075 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3077 SET_INFO(bh,&BLACKHOLE_info);
3080 update_frame = update_frame->link;
3084 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
3090 barf("threadPaused");
3095 //@node Stack squeezing, Pausing a thread, Lazy black holing
3096 //@subsection Stack squeezing
3098 /* -----------------------------------------------------------------------------
3101 * Code largely pinched from old RTS, then hacked to bits. We also do
3102 * lazy black holing here.
3104 * -------------------------------------------------------------------------- */
3105 //@cindex threadSqueezeStack
3108 threadSqueezeStack(StgTSO *tso)
3110 lnat displacement = 0;
3111 StgUpdateFrame *frame;
3112 StgUpdateFrame *next_frame; /* Temporally next */
3113 StgUpdateFrame *prev_frame; /* Temporally previous */
3115 rtsBool prev_was_update_frame;
3117 StgUpdateFrame *top_frame;
3118 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3120 void printObj( StgClosure *obj ); // from Printer.c
3122 top_frame = tso->su;
3125 bottom = &(tso->stack[tso->stack_size]);
3128 /* There must be at least one frame, namely the STOP_FRAME.
3130 ASSERT((P_)frame < bottom);
3132 /* Walk down the stack, reversing the links between frames so that
3133 * we can walk back up as we squeeze from the bottom. Note that
3134 * next_frame and prev_frame refer to next and previous as they were
3135 * added to the stack, rather than the way we see them in this
3136 * walk. (It makes the next loop less confusing.)
3138 * Stop if we find an update frame pointing to a black hole
3139 * (see comment in threadLazyBlackHole()).
3143 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3144 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3145 prev_frame = frame->link;
3146 frame->link = next_frame;
3151 if (!(frame>=top_frame && frame<=bottom)) {
3152 printObj((StgClosure *)prev_frame);
3153 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3156 switch (get_itbl(frame)->type) {
3157 case UPDATE_FRAME: upd_frames++;
3158 if (frame->updatee->header.info == &BLACKHOLE_info)
3161 case STOP_FRAME: stop_frames++;
3163 case CATCH_FRAME: catch_frames++;
3165 case SEQ_FRAME: seq_frames++;
3168 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3170 printObj((StgClosure *)prev_frame);
3173 if (get_itbl(frame)->type == UPDATE_FRAME
3174 && frame->updatee->header.info == &BLACKHOLE_info) {
3179 /* Now, we're at the bottom. Frame points to the lowest update
3180 * frame on the stack, and its link actually points to the frame
3181 * above. We have to walk back up the stack, squeezing out empty
3182 * update frames and turning the pointers back around on the way
3185 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3186 * we never want to eliminate it anyway. Just walk one step up
3187 * before starting to squeeze. When you get to the topmost frame,
3188 * remember that there are still some words above it that might have
3195 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3198 * Loop through all of the frames (everything except the very
3199 * bottom). Things are complicated by the fact that we have
3200 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3201 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3203 while (frame != NULL) {
3205 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3206 rtsBool is_update_frame;
3208 next_frame = frame->link;
3209 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3212 * 1. both the previous and current frame are update frames
3213 * 2. the current frame is empty
3215 if (prev_was_update_frame && is_update_frame &&
3216 (P_)prev_frame == frame_bottom + displacement) {
3218 /* Now squeeze out the current frame */
3219 StgClosure *updatee_keep = prev_frame->updatee;
3220 StgClosure *updatee_bypass = frame->updatee;
3223 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3227 /* Deal with blocking queues. If both updatees have blocked
3228 * threads, then we should merge the queues into the update
3229 * frame that we're keeping.
3231 * Alternatively, we could just wake them up: they'll just go
3232 * straight to sleep on the proper blackhole! This is less code
3233 * and probably less bug prone, although it's probably much
3236 #if 0 /* do it properly... */
3237 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3238 # error Unimplemented lazy BH warning. (KSW 1999-01)
3240 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3241 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3243 /* Sigh. It has one. Don't lose those threads! */
3244 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3245 /* Urgh. Two queues. Merge them. */
3246 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3248 while (keep_tso->link != END_TSO_QUEUE) {
3249 keep_tso = keep_tso->link;
3251 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3254 /* For simplicity, just swap the BQ for the BH */
3255 P_ temp = updatee_keep;
3257 updatee_keep = updatee_bypass;
3258 updatee_bypass = temp;
3260 /* Record the swap in the kept frame (below) */
3261 prev_frame->updatee = updatee_keep;
3266 TICK_UPD_SQUEEZED();
3267 /* wasn't there something about update squeezing and ticky to be
3268 * sorted out? oh yes: we aren't counting each enter properly
3269 * in this case. See the log somewhere. KSW 1999-04-21
3271 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3273 sp = (P_)frame - 1; /* sp = stuff to slide */
3274 displacement += sizeofW(StgUpdateFrame);
3277 /* No squeeze for this frame */
3278 sp = frame_bottom - 1; /* Keep the current frame */
3280 /* Do lazy black-holing.
3282 if (is_update_frame) {
3283 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3284 if (bh->header.info != &BLACKHOLE_info &&
3285 bh->header.info != &BLACKHOLE_BQ_info &&
3286 bh->header.info != &CAF_BLACKHOLE_info) {
3287 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3288 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3290 SET_INFO(bh,&BLACKHOLE_info);
3294 /* Fix the link in the current frame (should point to the frame below) */
3295 frame->link = prev_frame;
3296 prev_was_update_frame = is_update_frame;
3299 /* Now slide all words from sp up to the next frame */
3301 if (displacement > 0) {
3302 P_ next_frame_bottom;
3304 if (next_frame != NULL)
3305 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3307 next_frame_bottom = tso->sp - 1;
3311 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3315 while (sp >= next_frame_bottom) {
3316 sp[displacement] = *sp;
3320 (P_)prev_frame = (P_)frame + displacement;
3324 tso->sp += displacement;
3325 tso->su = prev_frame;
3328 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3329 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3333 //@node Pausing a thread, Index, Stack squeezing
3334 //@subsection Pausing a thread
3336 /* -----------------------------------------------------------------------------
3339 * We have to prepare for GC - this means doing lazy black holing
3340 * here. We also take the opportunity to do stack squeezing if it's
3342 * -------------------------------------------------------------------------- */
3343 //@cindex threadPaused
3346 threadPaused(StgTSO *tso)
3348 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3349 threadSqueezeStack(tso); /* does black holing too */
3351 threadLazyBlackHole(tso);
3355 //@cindex printMutOnceList
3357 printMutOnceList(generation *gen)
3359 const StgInfoTable *info;
3360 StgMutClosure *p, *next, *new_list;
3362 p = gen->mut_once_list;
3363 new_list = END_MUT_LIST;
3367 failed_to_evac = rtsFalse;
3369 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3370 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3371 fprintf(stderr, "%p (%s), ",
3372 p, info_type((StgClosure *)p));
3374 fputc('\n', stderr);
3377 //@cindex printMutableList
3379 printMutableList(generation *gen)
3381 const StgInfoTable *info;
3382 StgMutClosure *p, *next;
3384 p = gen->saved_mut_list;
3388 failed_to_evac = rtsFalse;
3390 fprintf(stderr, "@@ Mutable list %p: ", gen->saved_mut_list);
3391 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3392 fprintf(stderr, "%p (%s), ",
3393 p, info_type((StgClosure *)p));
3395 fputc('\n', stderr);
3399 //@node Index, , Pausing a thread
3403 //* GarbageCollect:: @cindex\s-+GarbageCollect
3404 //* MarkRoot:: @cindex\s-+MarkRoot
3405 //* RevertCAFs:: @cindex\s-+RevertCAFs
3406 //* addBlock:: @cindex\s-+addBlock
3407 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3408 //* copy:: @cindex\s-+copy
3409 //* copyPart:: @cindex\s-+copyPart
3410 //* evacuate:: @cindex\s-+evacuate
3411 //* evacuate_large:: @cindex\s-+evacuate_large
3412 //* gcCAFs:: @cindex\s-+gcCAFs
3413 //* isAlive:: @cindex\s-+isAlive
3414 //* mkMutCons:: @cindex\s-+mkMutCons
3415 //* relocate_TSO:: @cindex\s-+relocate_TSO
3416 //* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs
3417 //* scavenge:: @cindex\s-+scavenge
3418 //* scavenge_large:: @cindex\s-+scavenge_large
3419 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3420 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3421 //* scavenge_one:: @cindex\s-+scavenge_one
3422 //* scavenge_srt:: @cindex\s-+scavenge_srt
3423 //* scavenge_stack:: @cindex\s-+scavenge_stack
3424 //* scavenge_static:: @cindex\s-+scavenge_static
3425 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3426 //* threadPaused:: @cindex\s-+threadPaused
3427 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3428 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3429 //* upd_evacuee:: @cindex\s-+upd_evacuee
3430 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3431 //* zero_static_object_list:: @cindex\s-+zero_static_object_list