1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.71 2000/01/14 14:55:03 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
45 #if defined(GRAN) || defined(PAR)
46 # include "GranSimRts.h"
47 # include "ParallelRts.h"
51 # include "ParallelDebug.h"
57 //@node STATIC OBJECT LIST, Static function declarations, Includes
58 //@subsection STATIC OBJECT LIST
60 /* STATIC OBJECT LIST.
63 * We maintain a linked list of static objects that are still live.
64 * The requirements for this list are:
66 * - we need to scan the list while adding to it, in order to
67 * scavenge all the static objects (in the same way that
68 * breadth-first scavenging works for dynamic objects).
70 * - we need to be able to tell whether an object is already on
71 * the list, to break loops.
73 * Each static object has a "static link field", which we use for
74 * linking objects on to the list. We use a stack-type list, consing
75 * objects on the front as they are added (this means that the
76 * scavenge phase is depth-first, not breadth-first, but that
79 * A separate list is kept for objects that have been scavenged
80 * already - this is so that we can zero all the marks afterwards.
82 * An object is on the list if its static link field is non-zero; this
83 * means that we have to mark the end of the list with '1', not NULL.
85 * Extra notes for generational GC:
87 * Each generation has a static object list associated with it. When
88 * collecting generations up to N, we treat the static object lists
89 * from generations > N as roots.
91 * We build up a static object list while collecting generations 0..N,
92 * which is then appended to the static object list of generation N+1.
94 StgClosure* static_objects; /* live static objects */
95 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
97 /* N is the oldest generation being collected, where the generations
98 * are numbered starting at 0. A major GC (indicated by the major_gc
99 * flag) is when we're collecting all generations. We only attempt to
100 * deal with static objects and GC CAFs when doing a major GC.
103 static rtsBool major_gc;
105 /* Youngest generation that objects should be evacuated to in
106 * evacuate(). (Logically an argument to evacuate, but it's static
107 * a lot of the time so we optimise it into a global variable).
113 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
114 static rtsBool weak_done; /* all done for this pass */
116 /* Flag indicating failure to evacuate an object to the desired
119 static rtsBool failed_to_evac;
121 /* Old to-space (used for two-space collector only)
123 bdescr *old_to_space;
125 /* Data used for allocation area sizing.
127 lnat new_blocks; /* blocks allocated during this GC */
128 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
130 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
131 //@subsection Static function declarations
133 /* -----------------------------------------------------------------------------
134 Static function declarations
135 -------------------------------------------------------------------------- */
137 static StgClosure * evacuate ( StgClosure *q );
138 static void zero_static_object_list ( StgClosure* first_static );
139 static void zero_mutable_list ( StgMutClosure *first );
140 static void revert_dead_CAFs ( void );
142 static rtsBool traverse_weak_ptr_list ( void );
143 static void cleanup_weak_ptr_list ( StgWeak **list );
145 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
146 static void scavenge_large ( step *step );
147 static void scavenge ( step *step );
148 static void scavenge_static ( void );
149 static void scavenge_mutable_list ( generation *g );
150 static void scavenge_mut_once_list ( generation *g );
153 static void gcCAFs ( void );
156 //@node Garbage Collect, Weak Pointers, Static function declarations
157 //@subsection Garbage Collect
159 /* -----------------------------------------------------------------------------
162 For garbage collecting generation N (and all younger generations):
164 - follow all pointers in the root set. the root set includes all
165 mutable objects in all steps in all generations.
167 - for each pointer, evacuate the object it points to into either
168 + to-space in the next higher step in that generation, if one exists,
169 + if the object's generation == N, then evacuate it to the next
170 generation if one exists, or else to-space in the current
172 + if the object's generation < N, then evacuate it to to-space
173 in the next generation.
175 - repeatedly scavenge to-space from each step in each generation
176 being collected until no more objects can be evacuated.
178 - free from-space in each step, and set from-space = to-space.
180 -------------------------------------------------------------------------- */
181 //@cindex GarbageCollect
183 void GarbageCollect(void (*get_roots)(void))
187 lnat live, allocated, collected = 0, copied = 0;
191 CostCentreStack *prev_CCS;
194 #if defined(DEBUG) && defined(GRAN)
195 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
199 /* tell the stats department that we've started a GC */
202 /* attribute any costs to CCS_GC */
208 /* Approximate how much we allocated */
209 allocated = calcAllocated();
211 /* Figure out which generation to collect
214 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
215 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
219 major_gc = (N == RtsFlags.GcFlags.generations-1);
221 /* check stack sanity *before* GC (ToDo: check all threads) */
223 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
225 IF_DEBUG(sanity, checkFreeListSanity());
227 /* Initialise the static object lists
229 static_objects = END_OF_STATIC_LIST;
230 scavenged_static_objects = END_OF_STATIC_LIST;
232 /* zero the mutable list for the oldest generation (see comment by
233 * zero_mutable_list below).
236 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
239 /* Save the old to-space if we're doing a two-space collection
241 if (RtsFlags.GcFlags.generations == 1) {
242 old_to_space = g0s0->to_space;
243 g0s0->to_space = NULL;
246 /* Keep a count of how many new blocks we allocated during this GC
247 * (used for resizing the allocation area, later).
251 /* Initialise to-space in all the generations/steps that we're
254 for (g = 0; g <= N; g++) {
255 generations[g].mut_once_list = END_MUT_LIST;
256 generations[g].mut_list = END_MUT_LIST;
258 for (s = 0; s < generations[g].n_steps; s++) {
260 /* generation 0, step 0 doesn't need to-space */
261 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
265 /* Get a free block for to-space. Extra blocks will be chained on
269 step = &generations[g].steps[s];
270 ASSERT(step->gen->no == g);
271 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
272 bd->gen = &generations[g];
275 bd->evacuated = 1; /* it's a to-space block */
276 step->hp = bd->start;
277 step->hpLim = step->hp + BLOCK_SIZE_W;
281 step->scan = bd->start;
283 step->new_large_objects = NULL;
284 step->scavenged_large_objects = NULL;
286 /* mark the large objects as not evacuated yet */
287 for (bd = step->large_objects; bd; bd = bd->link) {
293 /* make sure the older generations have at least one block to
294 * allocate into (this makes things easier for copy(), see below.
296 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
297 for (s = 0; s < generations[g].n_steps; s++) {
298 step = &generations[g].steps[s];
299 if (step->hp_bd == NULL) {
301 bd->gen = &generations[g];
304 bd->evacuated = 0; /* *not* a to-space block */
305 step->hp = bd->start;
306 step->hpLim = step->hp + BLOCK_SIZE_W;
312 /* Set the scan pointer for older generations: remember we
313 * still have to scavenge objects that have been promoted. */
314 step->scan = step->hp;
315 step->scan_bd = step->hp_bd;
316 step->to_space = NULL;
318 step->new_large_objects = NULL;
319 step->scavenged_large_objects = NULL;
323 /* -----------------------------------------------------------------------
324 * follow all the roots that we know about:
325 * - mutable lists from each generation > N
326 * we want to *scavenge* these roots, not evacuate them: they're not
327 * going to move in this GC.
328 * Also: do them in reverse generation order. This is because we
329 * often want to promote objects that are pointed to by older
330 * generations early, so we don't have to repeatedly copy them.
331 * Doing the generations in reverse order ensures that we don't end
332 * up in the situation where we want to evac an object to gen 3 and
333 * it has already been evaced to gen 2.
337 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
338 generations[g].saved_mut_list = generations[g].mut_list;
339 generations[g].mut_list = END_MUT_LIST;
342 /* Do the mut-once lists first */
343 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
344 IF_PAR_DEBUG(verbose,
345 printMutOnceList(&generations[g]));
346 scavenge_mut_once_list(&generations[g]);
348 for (st = generations[g].n_steps-1; st >= 0; st--) {
349 scavenge(&generations[g].steps[st]);
353 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
354 IF_PAR_DEBUG(verbose,
355 printMutableList(&generations[g]));
356 scavenge_mutable_list(&generations[g]);
358 for (st = generations[g].n_steps-1; st >= 0; st--) {
359 scavenge(&generations[g].steps[st]);
364 /* follow all the roots that the application knows about.
370 /* And don't forget to mark the TSO if we got here direct from
372 /* Not needed in a seq version?
374 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
378 /* Mark the entries in the GALA table of the parallel system */
379 markLocalGAs(major_gc);
382 /* Mark the weak pointer list, and prepare to detect dead weak
385 old_weak_ptr_list = weak_ptr_list;
386 weak_ptr_list = NULL;
387 weak_done = rtsFalse;
389 /* Mark the stable pointer table.
391 markStablePtrTable(major_gc);
395 /* ToDo: To fix the caf leak, we need to make the commented out
396 * parts of this code do something sensible - as described in
399 extern void markHugsObjects(void);
404 /* -------------------------------------------------------------------------
405 * Repeatedly scavenge all the areas we know about until there's no
406 * more scavenging to be done.
413 /* scavenge static objects */
414 if (major_gc && static_objects != END_OF_STATIC_LIST) {
418 /* When scavenging the older generations: Objects may have been
419 * evacuated from generations <= N into older generations, and we
420 * need to scavenge these objects. We're going to try to ensure that
421 * any evacuations that occur move the objects into at least the
422 * same generation as the object being scavenged, otherwise we
423 * have to create new entries on the mutable list for the older
427 /* scavenge each step in generations 0..maxgen */
431 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
432 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
433 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
436 step = &generations[gen].steps[st];
438 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
443 if (step->new_large_objects != NULL) {
444 scavenge_large(step);
451 if (flag) { goto loop; }
453 /* must be last... */
454 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
459 /* Final traversal of the weak pointer list (see comment by
460 * cleanUpWeakPtrList below).
462 cleanup_weak_ptr_list(&weak_ptr_list);
464 /* Now see which stable names are still alive.
466 gcStablePtrTable(major_gc);
468 /* revert dead CAFs and update enteredCAFs list */
471 /* Set the maximum blocks for the oldest generation, based on twice
472 * the amount of live data now, adjusted to fit the maximum heap
475 * This is an approximation, since in the worst case we'll need
476 * twice the amount of live data plus whatever space the other
479 if (RtsFlags.GcFlags.generations > 1) {
481 oldest_gen->max_blocks =
482 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
483 RtsFlags.GcFlags.minOldGenSize);
484 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
485 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
486 if (((int)oldest_gen->max_blocks -
487 (int)oldest_gen->steps[0].to_blocks) <
488 (RtsFlags.GcFlags.pcFreeHeap *
489 RtsFlags.GcFlags.maxHeapSize / 200)) {
496 /* run through all the generations/steps and tidy up
498 copied = new_blocks * BLOCK_SIZE_W;
499 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
502 generations[g].collections++; /* for stats */
505 for (s = 0; s < generations[g].n_steps; s++) {
507 step = &generations[g].steps[s];
509 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
510 /* Tidy the end of the to-space chains */
511 step->hp_bd->free = step->hp;
512 step->hp_bd->link = NULL;
513 /* stats information: how much we copied */
515 copied -= step->hp_bd->start + BLOCK_SIZE_W -
520 /* for generations we collected... */
523 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
525 /* free old memory and shift to-space into from-space for all
526 * the collected steps (except the allocation area). These
527 * freed blocks will probaby be quickly recycled.
529 if (!(g == 0 && s == 0)) {
530 freeChain(step->blocks);
531 step->blocks = step->to_space;
532 step->n_blocks = step->to_blocks;
533 step->to_space = NULL;
535 for (bd = step->blocks; bd != NULL; bd = bd->link) {
536 bd->evacuated = 0; /* now from-space */
540 /* LARGE OBJECTS. The current live large objects are chained on
541 * scavenged_large, having been moved during garbage
542 * collection from large_objects. Any objects left on
543 * large_objects list are therefore dead, so we free them here.
545 for (bd = step->large_objects; bd != NULL; bd = next) {
550 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
553 step->large_objects = step->scavenged_large_objects;
555 /* Set the maximum blocks for this generation, interpolating
556 * between the maximum size of the oldest and youngest
559 * max_blocks = oldgen_max_blocks * G
560 * ----------------------
565 generations[g].max_blocks = (oldest_gen->max_blocks * g)
566 / (RtsFlags.GcFlags.generations-1);
568 generations[g].max_blocks = oldest_gen->max_blocks;
571 /* for older generations... */
574 /* For older generations, we need to append the
575 * scavenged_large_object list (i.e. large objects that have been
576 * promoted during this GC) to the large_object list for that step.
578 for (bd = step->scavenged_large_objects; bd; bd = next) {
581 dbl_link_onto(bd, &step->large_objects);
584 /* add the new blocks we promoted during this GC */
585 step->n_blocks += step->to_blocks;
590 /* Guess the amount of live data for stats. */
593 /* Free the small objects allocated via allocate(), since this will
594 * all have been copied into G0S1 now.
596 if (small_alloc_list != NULL) {
597 freeChain(small_alloc_list);
599 small_alloc_list = NULL;
603 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
605 /* Two-space collector:
606 * Free the old to-space, and estimate the amount of live data.
608 if (RtsFlags.GcFlags.generations == 1) {
611 if (old_to_space != NULL) {
612 freeChain(old_to_space);
614 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
615 bd->evacuated = 0; /* now from-space */
618 /* For a two-space collector, we need to resize the nursery. */
620 /* set up a new nursery. Allocate a nursery size based on a
621 * function of the amount of live data (currently a factor of 2,
622 * should be configurable (ToDo)). Use the blocks from the old
623 * nursery if possible, freeing up any left over blocks.
625 * If we get near the maximum heap size, then adjust our nursery
626 * size accordingly. If the nursery is the same size as the live
627 * data (L), then we need 3L bytes. We can reduce the size of the
628 * nursery to bring the required memory down near 2L bytes.
630 * A normal 2-space collector would need 4L bytes to give the same
631 * performance we get from 3L bytes, reducing to the same
632 * performance at 2L bytes.
634 blocks = g0s0->to_blocks;
636 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
637 RtsFlags.GcFlags.maxHeapSize ) {
638 int adjusted_blocks; /* signed on purpose */
641 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
642 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
643 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
644 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
647 blocks = adjusted_blocks;
650 blocks *= RtsFlags.GcFlags.oldGenFactor;
651 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
652 blocks = RtsFlags.GcFlags.minAllocAreaSize;
655 resizeNursery(blocks);
658 /* Generational collector:
659 * If the user has given us a suggested heap size, adjust our
660 * allocation area to make best use of the memory available.
663 if (RtsFlags.GcFlags.heapSizeSuggestion) {
665 nat needed = calcNeeded(); /* approx blocks needed at next GC */
667 /* Guess how much will be live in generation 0 step 0 next time.
668 * A good approximation is the obtained by finding the
669 * percentage of g0s0 that was live at the last minor GC.
672 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
675 /* Estimate a size for the allocation area based on the
676 * information available. We might end up going slightly under
677 * or over the suggested heap size, but we should be pretty
680 * Formula: suggested - needed
681 * ----------------------------
682 * 1 + g0s0_pcnt_kept/100
684 * where 'needed' is the amount of memory needed at the next
685 * collection for collecting all steps except g0s0.
688 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
689 (100 + (int)g0s0_pcnt_kept);
691 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
692 blocks = RtsFlags.GcFlags.minAllocAreaSize;
695 resizeNursery((nat)blocks);
699 /* mark the garbage collected CAFs as dead */
701 if (major_gc) { gcCAFs(); }
704 /* zero the scavenged static object list */
706 zero_static_object_list(scavenged_static_objects);
714 /* Reconstruct the Global Address tables used in GUM */
715 RebuildGAtables(major_gc);
718 /* start any pending finalizers */
719 scheduleFinalizers(old_weak_ptr_list);
721 /* check sanity after GC */
722 IF_DEBUG(sanity, checkSanity(N));
724 /* extra GC trace info */
725 IF_DEBUG(gc, stat_describe_gens());
728 /* symbol-table based profiling */
729 /* heapCensus(to_space); */ /* ToDo */
732 /* restore enclosing cost centre */
738 /* check for memory leaks if sanity checking is on */
739 IF_DEBUG(sanity, memInventory());
741 /* ok, GC over: tell the stats department what happened. */
742 stat_endGC(allocated, collected, live, copied, N);
745 //@node Weak Pointers, Evacuation, Garbage Collect
746 //@subsection Weak Pointers
748 /* -----------------------------------------------------------------------------
751 traverse_weak_ptr_list is called possibly many times during garbage
752 collection. It returns a flag indicating whether it did any work
753 (i.e. called evacuate on any live pointers).
755 Invariant: traverse_weak_ptr_list is called when the heap is in an
756 idempotent state. That means that there are no pending
757 evacuate/scavenge operations. This invariant helps the weak
758 pointer code decide which weak pointers are dead - if there are no
759 new live weak pointers, then all the currently unreachable ones are
762 For generational GC: we just don't try to finalize weak pointers in
763 older generations than the one we're collecting. This could
764 probably be optimised by keeping per-generation lists of weak
765 pointers, but for a few weak pointers this scheme will work.
766 -------------------------------------------------------------------------- */
767 //@cindex traverse_weak_ptr_list
770 traverse_weak_ptr_list(void)
772 StgWeak *w, **last_w, *next_w;
774 rtsBool flag = rtsFalse;
776 if (weak_done) { return rtsFalse; }
778 /* doesn't matter where we evacuate values/finalizers to, since
779 * these pointers are treated as roots (iff the keys are alive).
783 last_w = &old_weak_ptr_list;
784 for (w = old_weak_ptr_list; w; w = next_w) {
786 /* First, this weak pointer might have been evacuated. If so,
787 * remove the forwarding pointer from the weak_ptr_list.
789 if (get_itbl(w)->type == EVACUATED) {
790 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
794 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
795 * called on a live weak pointer object. Just remove it.
797 if (w->header.info == &DEAD_WEAK_info) {
798 next_w = ((StgDeadWeak *)w)->link;
803 ASSERT(get_itbl(w)->type == WEAK);
805 /* Now, check whether the key is reachable.
807 if ((new = isAlive(w->key))) {
809 /* evacuate the value and finalizer */
810 w->value = evacuate(w->value);
811 w->finalizer = evacuate(w->finalizer);
812 /* remove this weak ptr from the old_weak_ptr list */
814 /* and put it on the new weak ptr list */
816 w->link = weak_ptr_list;
819 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
829 /* If we didn't make any changes, then we can go round and kill all
830 * the dead weak pointers. The old_weak_ptr list is used as a list
831 * of pending finalizers later on.
833 if (flag == rtsFalse) {
834 cleanup_weak_ptr_list(&old_weak_ptr_list);
835 for (w = old_weak_ptr_list; w; w = w->link) {
836 w->finalizer = evacuate(w->finalizer);
844 /* -----------------------------------------------------------------------------
845 After GC, the live weak pointer list may have forwarding pointers
846 on it, because a weak pointer object was evacuated after being
847 moved to the live weak pointer list. We remove those forwarding
850 Also, we don't consider weak pointer objects to be reachable, but
851 we must nevertheless consider them to be "live" and retain them.
852 Therefore any weak pointer objects which haven't as yet been
853 evacuated need to be evacuated now.
854 -------------------------------------------------------------------------- */
856 //@cindex cleanup_weak_ptr_list
859 cleanup_weak_ptr_list ( StgWeak **list )
861 StgWeak *w, **last_w;
864 for (w = *list; w; w = w->link) {
866 if (get_itbl(w)->type == EVACUATED) {
867 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
871 if (Bdescr((P_)w)->evacuated == 0) {
872 (StgClosure *)w = evacuate((StgClosure *)w);
879 /* -----------------------------------------------------------------------------
880 isAlive determines whether the given closure is still alive (after
881 a garbage collection) or not. It returns the new address of the
882 closure if it is alive, or NULL otherwise.
883 -------------------------------------------------------------------------- */
888 isAlive(StgClosure *p)
890 const StgInfoTable *info;
896 /* ToDo: for static closures, check the static link field.
897 * Problem here is that we sometimes don't set the link field, eg.
898 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
901 #if 1 || !defined(PAR)
902 /* ignore closures in generations that we're not collecting. */
903 /* In GUM we use this routine when rebuilding GA tables; for some
904 reason it has problems with the LOOKS_LIKE_STATIC macro -- HWL */
905 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
910 switch (info->type) {
915 case IND_OLDGEN: /* rely on compatible layout with StgInd */
916 case IND_OLDGEN_PERM:
917 /* follow indirections */
918 p = ((StgInd *)p)->indirectee;
923 return ((StgEvacuated *)p)->evacuee;
934 MarkRoot(StgClosure *root)
936 //if (root != END_TSO_QUEUE)
937 return evacuate(root);
941 static void addBlock(step *step)
943 bdescr *bd = allocBlock();
947 if (step->gen->no <= N) {
953 step->hp_bd->free = step->hp;
954 step->hp_bd->link = bd;
955 step->hp = bd->start;
956 step->hpLim = step->hp + BLOCK_SIZE_W;
962 //@cindex upd_evacuee
964 static __inline__ void
965 upd_evacuee(StgClosure *p, StgClosure *dest)
967 p->header.info = &EVACUATED_info;
968 ((StgEvacuated *)p)->evacuee = dest;
973 static __inline__ StgClosure *
974 copy(StgClosure *src, nat size, step *step)
978 TICK_GC_WORDS_COPIED(size);
979 /* Find out where we're going, using the handy "to" pointer in
980 * the step of the source object. If it turns out we need to
981 * evacuate to an older generation, adjust it here (see comment
984 if (step->gen->no < evac_gen) {
985 #ifdef NO_EAGER_PROMOTION
986 failed_to_evac = rtsTrue;
988 step = &generations[evac_gen].steps[0];
992 /* chain a new block onto the to-space for the destination step if
995 if (step->hp + size >= step->hpLim) {
999 for(to = step->hp, from = (P_)src; size>0; --size) {
1005 upd_evacuee(src,(StgClosure *)dest);
1006 return (StgClosure *)dest;
1009 /* Special version of copy() for when we only want to copy the info
1010 * pointer of an object, but reserve some padding after it. This is
1011 * used to optimise evacuation of BLACKHOLEs.
1016 static __inline__ StgClosure *
1017 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1021 TICK_GC_WORDS_COPIED(size_to_copy);
1022 if (step->gen->no < evac_gen) {
1023 #ifdef NO_EAGER_PROMOTION
1024 failed_to_evac = rtsTrue;
1026 step = &generations[evac_gen].steps[0];
1030 if (step->hp + size_to_reserve >= step->hpLim) {
1034 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1039 step->hp += size_to_reserve;
1040 upd_evacuee(src,(StgClosure *)dest);
1041 return (StgClosure *)dest;
1044 //@node Evacuation, Scavenging, Weak Pointers
1045 //@subsection Evacuation
1047 /* -----------------------------------------------------------------------------
1048 Evacuate a large object
1050 This just consists of removing the object from the (doubly-linked)
1051 large_alloc_list, and linking it on to the (singly-linked)
1052 new_large_objects list, from where it will be scavenged later.
1054 Convention: bd->evacuated is /= 0 for a large object that has been
1055 evacuated, or 0 otherwise.
1056 -------------------------------------------------------------------------- */
1058 //@cindex evacuate_large
1061 evacuate_large(StgPtr p, rtsBool mutable)
1063 bdescr *bd = Bdescr(p);
1066 /* should point to the beginning of the block */
1067 ASSERT(((W_)p & BLOCK_MASK) == 0);
1069 /* already evacuated? */
1070 if (bd->evacuated) {
1071 /* Don't forget to set the failed_to_evac flag if we didn't get
1072 * the desired destination (see comments in evacuate()).
1074 if (bd->gen->no < evac_gen) {
1075 failed_to_evac = rtsTrue;
1076 TICK_GC_FAILED_PROMOTION();
1082 /* remove from large_object list */
1084 bd->back->link = bd->link;
1085 } else { /* first object in the list */
1086 step->large_objects = bd->link;
1089 bd->link->back = bd->back;
1092 /* link it on to the evacuated large object list of the destination step
1094 step = bd->step->to;
1095 if (step->gen->no < evac_gen) {
1096 #ifdef NO_EAGER_PROMOTION
1097 failed_to_evac = rtsTrue;
1099 step = &generations[evac_gen].steps[0];
1104 bd->gen = step->gen;
1105 bd->link = step->new_large_objects;
1106 step->new_large_objects = bd;
1110 recordMutable((StgMutClosure *)p);
1114 /* -----------------------------------------------------------------------------
1115 Adding a MUT_CONS to an older generation.
1117 This is necessary from time to time when we end up with an
1118 old-to-new generation pointer in a non-mutable object. We defer
1119 the promotion until the next GC.
1120 -------------------------------------------------------------------------- */
1125 mkMutCons(StgClosure *ptr, generation *gen)
1130 step = &gen->steps[0];
1132 /* chain a new block onto the to-space for the destination step if
1135 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1139 q = (StgMutVar *)step->hp;
1140 step->hp += sizeofW(StgMutVar);
1142 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1144 recordOldToNewPtrs((StgMutClosure *)q);
1146 return (StgClosure *)q;
1149 /* -----------------------------------------------------------------------------
1152 This is called (eventually) for every live object in the system.
1154 The caller to evacuate specifies a desired generation in the
1155 evac_gen global variable. The following conditions apply to
1156 evacuating an object which resides in generation M when we're
1157 collecting up to generation N
1161 else evac to step->to
1163 if M < evac_gen evac to evac_gen, step 0
1165 if the object is already evacuated, then we check which generation
1168 if M >= evac_gen do nothing
1169 if M < evac_gen set failed_to_evac flag to indicate that we
1170 didn't manage to evacuate this object into evac_gen.
1172 -------------------------------------------------------------------------- */
1176 evacuate(StgClosure *q)
1181 const StgInfoTable *info;
1184 if (HEAP_ALLOCED(q)) {
1186 if (bd->gen->no > N) {
1187 /* Can't evacuate this object, because it's in a generation
1188 * older than the ones we're collecting. Let's hope that it's
1189 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1191 if (bd->gen->no < evac_gen) {
1193 failed_to_evac = rtsTrue;
1194 TICK_GC_FAILED_PROMOTION();
1198 step = bd->step->to;
1201 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1204 /* make sure the info pointer is into text space */
1205 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1206 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1209 if (info->type==RBH) {
1210 info = REVERT_INFOPTR(info);
1212 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1213 q, info_type(q), info, info_type_by_ip(info)));
1217 switch (info -> type) {
1221 nat size = bco_sizeW((StgBCO*)q);
1223 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1224 evacuate_large((P_)q, rtsFalse);
1227 /* just copy the block */
1228 to = copy(q,size,step);
1234 ASSERT(q->header.info != &MUT_CONS_info);
1236 to = copy(q,sizeW_fromITBL(info),step);
1237 recordMutable((StgMutClosure *)to);
1244 return copy(q,sizeofW(StgHeader)+1,step);
1246 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1251 #ifdef NO_PROMOTE_THUNKS
1252 if (bd->gen->no == 0 &&
1253 bd->step->no != 0 &&
1254 bd->step->no == bd->gen->n_steps-1) {
1258 return copy(q,sizeofW(StgHeader)+2,step);
1266 return copy(q,sizeofW(StgHeader)+2,step);
1272 case IND_OLDGEN_PERM:
1278 return copy(q,sizeW_fromITBL(info),step);
1281 case SE_CAF_BLACKHOLE:
1284 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1287 to = copy(q,BLACKHOLE_sizeW(),step);
1288 recordMutable((StgMutClosure *)to);
1291 case THUNK_SELECTOR:
1293 const StgInfoTable* selectee_info;
1294 StgClosure* selectee = ((StgSelector*)q)->selectee;
1297 selectee_info = get_itbl(selectee);
1298 switch (selectee_info->type) {
1307 StgWord32 offset = info->layout.selector_offset;
1309 /* check that the size is in range */
1311 (StgWord32)(selectee_info->layout.payload.ptrs +
1312 selectee_info->layout.payload.nptrs));
1314 /* perform the selection! */
1315 q = selectee->payload[offset];
1317 /* if we're already in to-space, there's no need to continue
1318 * with the evacuation, just update the source address with
1319 * a pointer to the (evacuated) constructor field.
1321 if (HEAP_ALLOCED(q)) {
1322 bdescr *bd = Bdescr((P_)q);
1323 if (bd->evacuated) {
1324 if (bd->gen->no < evac_gen) {
1325 failed_to_evac = rtsTrue;
1326 TICK_GC_FAILED_PROMOTION();
1332 /* otherwise, carry on and evacuate this constructor field,
1333 * (but not the constructor itself)
1342 case IND_OLDGEN_PERM:
1343 selectee = stgCast(StgInd *,selectee)->indirectee;
1347 selectee = stgCast(StgCAF *,selectee)->value;
1351 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1361 case THUNK_SELECTOR:
1362 /* aargh - do recursively???? */
1365 case SE_CAF_BLACKHOLE:
1369 /* not evaluated yet */
1373 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1374 (int)(selectee_info->type));
1377 return copy(q,THUNK_SELECTOR_sizeW(),step);
1381 /* follow chains of indirections, don't evacuate them */
1382 q = ((StgInd*)q)->indirectee;
1386 if (info->srt_len > 0 && major_gc &&
1387 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1388 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1389 static_objects = (StgClosure *)q;
1394 if (info->srt_len > 0 && major_gc &&
1395 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1396 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1397 static_objects = (StgClosure *)q;
1402 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1403 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1404 static_objects = (StgClosure *)q;
1409 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1410 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1411 static_objects = (StgClosure *)q;
1415 case CONSTR_INTLIKE:
1416 case CONSTR_CHARLIKE:
1417 case CONSTR_NOCAF_STATIC:
1418 /* no need to put these on the static linked list, they don't need
1433 /* shouldn't see these */
1434 barf("evacuate: stack frame at %p\n", q);
1438 /* these are special - the payload is a copy of a chunk of stack,
1440 return copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1443 /* Already evacuated, just return the forwarding address.
1444 * HOWEVER: if the requested destination generation (evac_gen) is
1445 * older than the actual generation (because the object was
1446 * already evacuated to a younger generation) then we have to
1447 * set the failed_to_evac flag to indicate that we couldn't
1448 * manage to promote the object to the desired generation.
1450 if (evac_gen > 0) { /* optimisation */
1451 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1452 if (Bdescr((P_)p)->gen->no < evac_gen) {
1453 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1454 failed_to_evac = rtsTrue;
1455 TICK_GC_FAILED_PROMOTION();
1458 return ((StgEvacuated*)q)->evacuee;
1462 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1464 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1465 evacuate_large((P_)q, rtsFalse);
1468 /* just copy the block */
1469 return copy(q,size,step);
1474 case MUT_ARR_PTRS_FROZEN:
1476 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1478 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1479 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1482 /* just copy the block */
1483 to = copy(q,size,step);
1484 if (info->type == MUT_ARR_PTRS) {
1485 recordMutable((StgMutClosure *)to);
1493 StgTSO *tso = stgCast(StgTSO *,q);
1494 nat size = tso_sizeW(tso);
1497 /* Large TSOs don't get moved, so no relocation is required.
1499 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1500 evacuate_large((P_)q, rtsTrue);
1503 /* To evacuate a small TSO, we need to relocate the update frame
1507 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1509 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1511 /* relocate the stack pointers... */
1512 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1513 new_tso->sp = (StgPtr)new_tso->sp + diff;
1514 new_tso->splim = (StgPtr)new_tso->splim + diff;
1516 relocate_TSO(tso, new_tso);
1518 recordMutable((StgMutClosure *)new_tso);
1519 return (StgClosure *)new_tso;
1524 case RBH: // cf. BLACKHOLE_BQ
1526 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1527 to = copy(q,BLACKHOLE_sizeW(),step);
1528 //ToDo: derive size etc from reverted IP
1529 //to = copy(q,size,step);
1530 recordMutable((StgMutClosure *)to);
1532 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1533 q, info_type(q), to, info_type(to)));
1538 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1539 to = copy(q,sizeofW(StgBlockedFetch),step);
1541 belch("@@ evacuate: %p (%s) to %p (%s)",
1542 q, info_type(q), to, info_type(to)));
1546 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1547 to = copy(q,sizeofW(StgFetchMe),step);
1549 belch("@@ evacuate: %p (%s) to %p (%s)",
1550 q, info_type(q), to, info_type(to)));
1554 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1555 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1557 belch("@@ evacuate: %p (%s) to %p (%s)",
1558 q, info_type(q), to, info_type(to)));
1563 barf("evacuate: strange closure type %d", (int)(info->type));
1569 /* -----------------------------------------------------------------------------
1570 relocate_TSO is called just after a TSO has been copied from src to
1571 dest. It adjusts the update frame list for the new location.
1572 -------------------------------------------------------------------------- */
1573 //@cindex relocate_TSO
1576 relocate_TSO(StgTSO *src, StgTSO *dest)
1583 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1587 while ((P_)su < dest->stack + dest->stack_size) {
1588 switch (get_itbl(su)->type) {
1590 /* GCC actually manages to common up these three cases! */
1593 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1598 cf = (StgCatchFrame *)su;
1599 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1604 sf = (StgSeqFrame *)su;
1605 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1614 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1622 //@node Scavenging, Reverting CAFs, Evacuation
1623 //@subsection Scavenging
1625 //@cindex scavenge_srt
1628 scavenge_srt(const StgInfoTable *info)
1630 StgClosure **srt, **srt_end;
1632 /* evacuate the SRT. If srt_len is zero, then there isn't an
1633 * srt field in the info table. That's ok, because we'll
1634 * never dereference it.
1636 srt = stgCast(StgClosure **,info->srt);
1637 srt_end = srt + info->srt_len;
1638 for (; srt < srt_end; srt++) {
1639 /* Special-case to handle references to closures hiding out in DLLs, since
1640 double indirections required to get at those. The code generator knows
1641 which is which when generating the SRT, so it stores the (indirect)
1642 reference to the DLL closure in the table by first adding one to it.
1643 We check for this here, and undo the addition before evacuating it.
1645 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1646 closure that's fixed at link-time, and no extra magic is required.
1648 #ifdef ENABLE_WIN32_DLL_SUPPORT
1649 if ( stgCast(unsigned long,*srt) & 0x1 ) {
1650 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1660 /* -----------------------------------------------------------------------------
1662 -------------------------------------------------------------------------- */
1665 scavengeTSO (StgTSO *tso)
1667 /* chase the link field for any TSOs on the same queue */
1668 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1669 if ( tso->why_blocked == BlockedOnMVar
1670 || tso->why_blocked == BlockedOnBlackHole
1671 || tso->why_blocked == BlockedOnException) {
1672 tso->block_info.closure = evacuate(tso->block_info.closure);
1674 if ( tso->blocked_exceptions != NULL ) {
1675 tso->blocked_exceptions =
1676 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1678 /* scavenge this thread's stack */
1679 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1682 /* -----------------------------------------------------------------------------
1683 Scavenge a given step until there are no more objects in this step
1686 evac_gen is set by the caller to be either zero (for a step in a
1687 generation < N) or G where G is the generation of the step being
1690 We sometimes temporarily change evac_gen back to zero if we're
1691 scavenging a mutable object where early promotion isn't such a good
1693 -------------------------------------------------------------------------- */
1697 scavenge(step *step)
1700 const StgInfoTable *info;
1702 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1707 failed_to_evac = rtsFalse;
1709 /* scavenge phase - standard breadth-first scavenging of the
1713 while (bd != step->hp_bd || p < step->hp) {
1715 /* If we're at the end of this block, move on to the next block */
1716 if (bd != step->hp_bd && p == bd->free) {
1722 q = p; /* save ptr to object */
1724 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1725 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1727 info = get_itbl((StgClosure *)p);
1729 if (info->type==RBH)
1730 info = REVERT_INFOPTR(info);
1733 switch (info -> type) {
1737 StgBCO* bco = stgCast(StgBCO*,p);
1739 for (i = 0; i < bco->n_ptrs; i++) {
1740 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1742 p += bco_sizeW(bco);
1747 /* treat MVars specially, because we don't want to evacuate the
1748 * mut_link field in the middle of the closure.
1751 StgMVar *mvar = ((StgMVar *)p);
1753 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1754 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1755 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1756 p += sizeofW(StgMVar);
1757 evac_gen = saved_evac_gen;
1765 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1766 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1767 p += sizeofW(StgHeader) + 2;
1772 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1773 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1779 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1780 p += sizeofW(StgHeader) + 1;
1785 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1791 p += sizeofW(StgHeader) + 1;
1798 p += sizeofW(StgHeader) + 2;
1805 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1806 p += sizeofW(StgHeader) + 2;
1821 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1822 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1823 (StgClosure *)*p = evacuate((StgClosure *)*p);
1825 p += info->layout.payload.nptrs;
1830 if (step->gen->no != 0) {
1831 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1834 case IND_OLDGEN_PERM:
1835 ((StgIndOldGen *)p)->indirectee =
1836 evacuate(((StgIndOldGen *)p)->indirectee);
1837 if (failed_to_evac) {
1838 failed_to_evac = rtsFalse;
1839 recordOldToNewPtrs((StgMutClosure *)p);
1841 p += sizeofW(StgIndOldGen);
1846 StgCAF *caf = (StgCAF *)p;
1848 caf->body = evacuate(caf->body);
1849 if (failed_to_evac) {
1850 failed_to_evac = rtsFalse;
1851 recordOldToNewPtrs((StgMutClosure *)p);
1853 caf->mut_link = NULL;
1855 p += sizeofW(StgCAF);
1861 StgCAF *caf = (StgCAF *)p;
1863 caf->body = evacuate(caf->body);
1864 caf->value = evacuate(caf->value);
1865 if (failed_to_evac) {
1866 failed_to_evac = rtsFalse;
1867 recordOldToNewPtrs((StgMutClosure *)p);
1869 caf->mut_link = NULL;
1871 p += sizeofW(StgCAF);
1876 /* ignore MUT_CONSs */
1877 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1879 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1880 evac_gen = saved_evac_gen;
1882 p += sizeofW(StgMutVar);
1886 case SE_CAF_BLACKHOLE:
1889 p += BLACKHOLE_sizeW();
1894 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1895 (StgClosure *)bh->blocking_queue =
1896 evacuate((StgClosure *)bh->blocking_queue);
1897 if (failed_to_evac) {
1898 failed_to_evac = rtsFalse;
1899 recordMutable((StgMutClosure *)bh);
1901 p += BLACKHOLE_sizeW();
1905 case THUNK_SELECTOR:
1907 StgSelector *s = (StgSelector *)p;
1908 s->selectee = evacuate(s->selectee);
1909 p += THUNK_SELECTOR_sizeW();
1915 barf("scavenge:IND???\n");
1917 case CONSTR_INTLIKE:
1918 case CONSTR_CHARLIKE:
1920 case CONSTR_NOCAF_STATIC:
1924 /* Shouldn't see a static object here. */
1925 barf("scavenge: STATIC object\n");
1937 /* Shouldn't see stack frames here. */
1938 barf("scavenge: stack frame\n");
1940 case AP_UPD: /* same as PAPs */
1942 /* Treat a PAP just like a section of stack, not forgetting to
1943 * evacuate the function pointer too...
1946 StgPAP* pap = stgCast(StgPAP*,p);
1948 pap->fun = evacuate(pap->fun);
1949 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1950 p += pap_sizeW(pap);
1955 /* nothing to follow */
1956 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1960 /* follow everything */
1964 evac_gen = 0; /* repeatedly mutable */
1965 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1966 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1967 (StgClosure *)*p = evacuate((StgClosure *)*p);
1969 evac_gen = saved_evac_gen;
1973 case MUT_ARR_PTRS_FROZEN:
1974 /* follow everything */
1976 StgPtr start = p, next;
1978 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1979 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1980 (StgClosure *)*p = evacuate((StgClosure *)*p);
1982 if (failed_to_evac) {
1983 /* we can do this easier... */
1984 recordMutable((StgMutClosure *)start);
1985 failed_to_evac = rtsFalse;
1992 StgTSO *tso = (StgTSO *)p;
1995 evac_gen = saved_evac_gen;
1996 p += tso_sizeW(tso);
2001 case RBH: // cf. BLACKHOLE_BQ
2003 // nat size, ptrs, nonptrs, vhs;
2005 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2006 StgRBH *rbh = (StgRBH *)p;
2007 (StgClosure *)rbh->blocking_queue =
2008 evacuate((StgClosure *)rbh->blocking_queue);
2009 if (failed_to_evac) {
2010 failed_to_evac = rtsFalse;
2011 recordMutable((StgMutClosure *)rbh);
2014 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2015 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2016 // ToDo: use size of reverted closure here!
2017 p += BLACKHOLE_sizeW();
2023 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2024 /* follow the pointer to the node which is being demanded */
2025 (StgClosure *)bf->node =
2026 evacuate((StgClosure *)bf->node);
2027 /* follow the link to the rest of the blocking queue */
2028 (StgClosure *)bf->link =
2029 evacuate((StgClosure *)bf->link);
2030 if (failed_to_evac) {
2031 failed_to_evac = rtsFalse;
2032 recordMutable((StgMutClosure *)bf);
2035 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2036 bf, info_type((StgClosure *)bf),
2037 bf->node, info_type(bf->node)));
2038 p += sizeofW(StgBlockedFetch);
2044 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2045 p, info_type((StgClosure *)p)));
2046 p += sizeofW(StgFetchMe);
2047 break; // nothing to do in this case
2049 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2051 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2052 (StgClosure *)fmbq->blocking_queue =
2053 evacuate((StgClosure *)fmbq->blocking_queue);
2054 if (failed_to_evac) {
2055 failed_to_evac = rtsFalse;
2056 recordMutable((StgMutClosure *)fmbq);
2059 belch("@@ scavenge: %p (%s) exciting, isn't it",
2060 p, info_type((StgClosure *)p)));
2061 p += sizeofW(StgFetchMeBlockingQueue);
2067 barf("scavenge: unimplemented/strange closure type\n");
2073 /* If we didn't manage to promote all the objects pointed to by
2074 * the current object, then we have to designate this object as
2075 * mutable (because it contains old-to-new generation pointers).
2077 if (failed_to_evac) {
2078 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2079 failed_to_evac = rtsFalse;
2087 /* -----------------------------------------------------------------------------
2088 Scavenge one object.
2090 This is used for objects that are temporarily marked as mutable
2091 because they contain old-to-new generation pointers. Only certain
2092 objects can have this property.
2093 -------------------------------------------------------------------------- */
2094 //@cindex scavenge_one
2097 scavenge_one(StgClosure *p)
2099 const StgInfoTable *info;
2102 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2103 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2108 if (info->type==RBH)
2109 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2112 switch (info -> type) {
2115 case FUN_1_0: /* hardly worth specialising these guys */
2135 case IND_OLDGEN_PERM:
2140 end = (P_)p->payload + info->layout.payload.ptrs;
2141 for (q = (P_)p->payload; q < end; q++) {
2142 (StgClosure *)*q = evacuate((StgClosure *)*q);
2148 case SE_CAF_BLACKHOLE:
2153 case THUNK_SELECTOR:
2155 StgSelector *s = (StgSelector *)p;
2156 s->selectee = evacuate(s->selectee);
2160 case AP_UPD: /* same as PAPs */
2162 /* Treat a PAP just like a section of stack, not forgetting to
2163 * evacuate the function pointer too...
2166 StgPAP* pap = (StgPAP *)p;
2168 pap->fun = evacuate(pap->fun);
2169 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2174 /* This might happen if for instance a MUT_CONS was pointing to a
2175 * THUNK which has since been updated. The IND_OLDGEN will
2176 * be on the mutable list anyway, so we don't need to do anything
2182 barf("scavenge_one: strange object");
2185 no_luck = failed_to_evac;
2186 failed_to_evac = rtsFalse;
2191 /* -----------------------------------------------------------------------------
2192 Scavenging mutable lists.
2194 We treat the mutable list of each generation > N (i.e. all the
2195 generations older than the one being collected) as roots. We also
2196 remove non-mutable objects from the mutable list at this point.
2197 -------------------------------------------------------------------------- */
2198 //@cindex scavenge_mut_once_list
2201 scavenge_mut_once_list(generation *gen)
2203 const StgInfoTable *info;
2204 StgMutClosure *p, *next, *new_list;
2206 p = gen->mut_once_list;
2207 new_list = END_MUT_LIST;
2211 failed_to_evac = rtsFalse;
2213 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2215 /* make sure the info pointer is into text space */
2216 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2217 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2221 if (info->type==RBH)
2222 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2224 switch(info->type) {
2227 case IND_OLDGEN_PERM:
2229 /* Try to pull the indirectee into this generation, so we can
2230 * remove the indirection from the mutable list.
2232 ((StgIndOldGen *)p)->indirectee =
2233 evacuate(((StgIndOldGen *)p)->indirectee);
2236 if (RtsFlags.DebugFlags.gc)
2237 /* Debugging code to print out the size of the thing we just
2241 StgPtr start = gen->steps[0].scan;
2242 bdescr *start_bd = gen->steps[0].scan_bd;
2244 scavenge(&gen->steps[0]);
2245 if (start_bd != gen->steps[0].scan_bd) {
2246 size += (P_)BLOCK_ROUND_UP(start) - start;
2247 start_bd = start_bd->link;
2248 while (start_bd != gen->steps[0].scan_bd) {
2249 size += BLOCK_SIZE_W;
2250 start_bd = start_bd->link;
2252 size += gen->steps[0].scan -
2253 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2255 size = gen->steps[0].scan - start;
2257 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2261 /* failed_to_evac might happen if we've got more than two
2262 * generations, we're collecting only generation 0, the
2263 * indirection resides in generation 2 and the indirectee is
2266 if (failed_to_evac) {
2267 failed_to_evac = rtsFalse;
2268 p->mut_link = new_list;
2271 /* the mut_link field of an IND_STATIC is overloaded as the
2272 * static link field too (it just so happens that we don't need
2273 * both at the same time), so we need to NULL it out when
2274 * removing this object from the mutable list because the static
2275 * link fields are all assumed to be NULL before doing a major
2283 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2284 * it from the mutable list if possible by promoting whatever it
2287 ASSERT(p->header.info == &MUT_CONS_info);
2288 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2289 /* didn't manage to promote everything, so put the
2290 * MUT_CONS back on the list.
2292 p->mut_link = new_list;
2299 StgCAF *caf = (StgCAF *)p;
2300 caf->body = evacuate(caf->body);
2301 caf->value = evacuate(caf->value);
2302 if (failed_to_evac) {
2303 failed_to_evac = rtsFalse;
2304 p->mut_link = new_list;
2314 StgCAF *caf = (StgCAF *)p;
2315 caf->body = evacuate(caf->body);
2316 if (failed_to_evac) {
2317 failed_to_evac = rtsFalse;
2318 p->mut_link = new_list;
2327 /* shouldn't have anything else on the mutables list */
2328 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2332 gen->mut_once_list = new_list;
2335 //@cindex scavenge_mutable_list
2338 scavenge_mutable_list(generation *gen)
2340 const StgInfoTable *info;
2341 StgMutClosure *p, *next;
2343 p = gen->saved_mut_list;
2347 failed_to_evac = rtsFalse;
2349 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2351 /* make sure the info pointer is into text space */
2352 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2353 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2357 if (info->type==RBH)
2358 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2360 switch(info->type) {
2362 case MUT_ARR_PTRS_FROZEN:
2363 /* remove this guy from the mutable list, but follow the ptrs
2364 * anyway (and make sure they get promoted to this gen).
2370 belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS_FROZEN %p; size: %#x ; next: %p",
2371 p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
2373 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2375 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2376 (StgClosure *)*q = evacuate((StgClosure *)*q);
2380 if (failed_to_evac) {
2381 failed_to_evac = rtsFalse;
2382 p->mut_link = gen->mut_list;
2389 /* follow everything */
2390 p->mut_link = gen->mut_list;
2396 belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS %p; size: %#x ; next: %p",
2397 p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
2399 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2400 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2401 (StgClosure *)*q = evacuate((StgClosure *)*q);
2407 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2408 * it from the mutable list if possible by promoting whatever it
2412 belch("@@ scavenge_mut_list: scavenging MUT_VAR %p; var: %p ; next: %p",
2413 p, ((StgMutVar *)p)->var, p->mut_link));
2415 ASSERT(p->header.info != &MUT_CONS_info);
2416 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2417 p->mut_link = gen->mut_list;
2423 StgMVar *mvar = (StgMVar *)p;
2426 belch("@@ scavenge_mut_list: scavenging MAVR %p; head: %p; tail: %p; value: %p ; next: %p",
2427 mvar, mvar->head, mvar->tail, mvar->value, p->mut_link));
2429 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2430 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2431 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2432 p->mut_link = gen->mut_list;
2439 StgTSO *tso = (StgTSO *)p;
2443 /* Don't take this TSO off the mutable list - it might still
2444 * point to some younger objects (because we set evac_gen to 0
2447 tso->mut_link = gen->mut_list;
2448 gen->mut_list = (StgMutClosure *)tso;
2454 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2457 belch("@@ scavenge_mut_list: scavenging BLACKHOLE_BQ (%p); next: %p",
2460 (StgClosure *)bh->blocking_queue =
2461 evacuate((StgClosure *)bh->blocking_queue);
2462 p->mut_link = gen->mut_list;
2467 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2470 case IND_OLDGEN_PERM:
2471 /* Try to pull the indirectee into this generation, so we can
2472 * remove the indirection from the mutable list.
2475 ((StgIndOldGen *)p)->indirectee =
2476 evacuate(((StgIndOldGen *)p)->indirectee);
2479 if (failed_to_evac) {
2480 failed_to_evac = rtsFalse;
2481 p->mut_link = gen->mut_once_list;
2482 gen->mut_once_list = p;
2488 // HWL: old PAR code deleted here
2491 /* shouldn't have anything else on the mutables list */
2492 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2497 //@cindex scavenge_static
2500 scavenge_static(void)
2502 StgClosure* p = static_objects;
2503 const StgInfoTable *info;
2505 /* Always evacuate straight to the oldest generation for static
2507 evac_gen = oldest_gen->no;
2509 /* keep going until we've scavenged all the objects on the linked
2511 while (p != END_OF_STATIC_LIST) {
2515 if (info->type==RBH)
2516 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2518 /* make sure the info pointer is into text space */
2519 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2520 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2522 /* Take this object *off* the static_objects list,
2523 * and put it on the scavenged_static_objects list.
2525 static_objects = STATIC_LINK(info,p);
2526 STATIC_LINK(info,p) = scavenged_static_objects;
2527 scavenged_static_objects = p;
2529 switch (info -> type) {
2533 StgInd *ind = (StgInd *)p;
2534 ind->indirectee = evacuate(ind->indirectee);
2536 /* might fail to evacuate it, in which case we have to pop it
2537 * back on the mutable list (and take it off the
2538 * scavenged_static list because the static link and mut link
2539 * pointers are one and the same).
2541 if (failed_to_evac) {
2542 failed_to_evac = rtsFalse;
2543 scavenged_static_objects = STATIC_LINK(info,p);
2544 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2545 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2559 next = (P_)p->payload + info->layout.payload.ptrs;
2560 /* evacuate the pointers */
2561 for (q = (P_)p->payload; q < next; q++) {
2562 (StgClosure *)*q = evacuate((StgClosure *)*q);
2568 barf("scavenge_static");
2571 ASSERT(failed_to_evac == rtsFalse);
2573 /* get the next static object from the list. Remeber, there might
2574 * be more stuff on this list now that we've done some evacuating!
2575 * (static_objects is a global)
2581 /* -----------------------------------------------------------------------------
2582 scavenge_stack walks over a section of stack and evacuates all the
2583 objects pointed to by it. We can use the same code for walking
2584 PAPs, since these are just sections of copied stack.
2585 -------------------------------------------------------------------------- */
2586 //@cindex scavenge_stack
2589 scavenge_stack(StgPtr p, StgPtr stack_end)
2592 const StgInfoTable* info;
2595 IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2598 * Each time around this loop, we are looking at a chunk of stack
2599 * that starts with either a pending argument section or an
2600 * activation record.
2603 while (p < stack_end) {
2606 /* If we've got a tag, skip over that many words on the stack */
2607 if (IS_ARG_TAG((W_)q)) {
2612 /* Is q a pointer to a closure?
2614 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2616 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2617 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
2619 /* otherwise, must be a pointer into the allocation space. */
2622 (StgClosure *)*p = evacuate((StgClosure *)q);
2628 * Otherwise, q must be the info pointer of an activation
2629 * record. All activation records have 'bitmap' style layout
2632 info = get_itbl((StgClosure *)p);
2634 switch (info->type) {
2636 /* Dynamic bitmap: the mask is stored on the stack */
2638 bitmap = ((StgRetDyn *)p)->liveness;
2639 p = (P_)&((StgRetDyn *)p)->payload[0];
2642 /* probably a slow-entry point return address: */
2650 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2651 old_p, p, old_p+1));
2653 p++; /* what if FHS!=1 !? -- HWL */
2658 /* Specialised code for update frames, since they're so common.
2659 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2660 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2664 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2666 nat type = get_itbl(frame->updatee)->type;
2668 p += sizeofW(StgUpdateFrame);
2669 if (type == EVACUATED) {
2670 frame->updatee = evacuate(frame->updatee);
2673 bdescr *bd = Bdescr((P_)frame->updatee);
2675 if (bd->gen->no > N) {
2676 if (bd->gen->no < evac_gen) {
2677 failed_to_evac = rtsTrue;
2682 /* Don't promote blackholes */
2684 if (!(step->gen->no == 0 &&
2686 step->no == step->gen->n_steps-1)) {
2693 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2694 sizeofW(StgHeader), step);
2695 frame->updatee = to;
2698 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2699 frame->updatee = to;
2700 recordMutable((StgMutClosure *)to);
2703 /* will never be SE_{,CAF_}BLACKHOLE, since we
2704 don't push an update frame for single-entry thunks. KSW 1999-01. */
2705 barf("scavenge_stack: UPDATE_FRAME updatee");
2710 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2715 // StgPtr old_p = p; // debugging only -- HWL
2716 /* stack frames like these are ordinary closures and therefore may
2717 contain setup-specific fixed-header words (as in GranSim!);
2718 therefore, these cases should not use p++ but &(p->payload) -- HWL */
2719 // IF_DEBUG(gran, IF_DEBUG(sanity, printObj(p)));
2720 bitmap = info->layout.bitmap;
2722 p = (StgPtr)&(((StgClosure *)p)->payload);
2723 // IF_DEBUG(sanity, belch("HWL: scavenge_stack: (STOP|CATCH|SEQ)_FRAME adjusting p from %p to %p (instead of %p)", old_p, p, old_p+1));
2729 bitmap = info->layout.bitmap;
2731 /* this assumes that the payload starts immediately after the info-ptr */
2733 while (bitmap != 0) {
2734 if ((bitmap & 1) == 0) {
2735 (StgClosure *)*p = evacuate((StgClosure *)*p);
2738 bitmap = bitmap >> 1;
2745 /* large bitmap (> 32 entries) */
2750 StgLargeBitmap *large_bitmap;
2753 large_bitmap = info->layout.large_bitmap;
2756 for (i=0; i<large_bitmap->size; i++) {
2757 bitmap = large_bitmap->bitmap[i];
2758 q = p + sizeof(W_) * 8;
2759 while (bitmap != 0) {
2760 if ((bitmap & 1) == 0) {
2761 (StgClosure *)*p = evacuate((StgClosure *)*p);
2764 bitmap = bitmap >> 1;
2766 if (i+1 < large_bitmap->size) {
2768 (StgClosure *)*p = evacuate((StgClosure *)*p);
2774 /* and don't forget to follow the SRT */
2779 barf("scavenge_stack: weird activation record found on stack.\n");
2784 /*-----------------------------------------------------------------------------
2785 scavenge the large object list.
2787 evac_gen set by caller; similar games played with evac_gen as with
2788 scavenge() - see comment at the top of scavenge(). Most large
2789 objects are (repeatedly) mutable, so most of the time evac_gen will
2791 --------------------------------------------------------------------------- */
2792 //@cindex scavenge_large
2795 scavenge_large(step *step)
2799 const StgInfoTable* info;
2800 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2802 evac_gen = 0; /* most objects are mutable */
2803 bd = step->new_large_objects;
2805 for (; bd != NULL; bd = step->new_large_objects) {
2807 /* take this object *off* the large objects list and put it on
2808 * the scavenged large objects list. This is so that we can
2809 * treat new_large_objects as a stack and push new objects on
2810 * the front when evacuating.
2812 step->new_large_objects = bd->link;
2813 dbl_link_onto(bd, &step->scavenged_large_objects);
2816 info = get_itbl(stgCast(StgClosure*,p));
2818 switch (info->type) {
2820 /* only certain objects can be "large"... */
2823 /* nothing to follow */
2827 /* follow everything */
2831 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2832 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2833 (StgClosure *)*p = evacuate((StgClosure *)*p);
2838 case MUT_ARR_PTRS_FROZEN:
2839 /* follow everything */
2841 StgPtr start = p, next;
2843 evac_gen = saved_evac_gen; /* not really mutable */
2844 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2845 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2846 (StgClosure *)*p = evacuate((StgClosure *)*p);
2849 if (failed_to_evac) {
2850 recordMutable((StgMutClosure *)start);
2857 StgBCO* bco = stgCast(StgBCO*,p);
2859 evac_gen = saved_evac_gen;
2860 for (i = 0; i < bco->n_ptrs; i++) {
2861 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2868 scavengeTSO((StgTSO *)p);
2869 // HWL: old PAR code deleted here
2873 barf("scavenge_large: unknown/strange object");
2878 //@cindex zero_static_object_list
2881 zero_static_object_list(StgClosure* first_static)
2885 const StgInfoTable *info;
2887 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2889 link = STATIC_LINK(info, p);
2890 STATIC_LINK(info,p) = NULL;
2894 /* This function is only needed because we share the mutable link
2895 * field with the static link field in an IND_STATIC, so we have to
2896 * zero the mut_link field before doing a major GC, which needs the
2897 * static link field.
2899 * It doesn't do any harm to zero all the mutable link fields on the
2902 //@cindex zero_mutable_list
2905 zero_mutable_list( StgMutClosure *first )
2907 StgMutClosure *next, *c;
2909 for (c = first; c != END_MUT_LIST; c = next) {
2915 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
2916 //@subsection Reverting CAFs
2918 /* -----------------------------------------------------------------------------
2920 -------------------------------------------------------------------------- */
2921 //@cindex RevertCAFs
2923 void RevertCAFs(void)
2925 while (enteredCAFs != END_CAF_LIST) {
2926 StgCAF* caf = enteredCAFs;
2928 enteredCAFs = caf->link;
2929 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2930 SET_INFO(caf,&CAF_UNENTERED_info);
2931 caf->value = stgCast(StgClosure*,0xdeadbeef);
2932 caf->link = stgCast(StgCAF*,0xdeadbeef);
2934 enteredCAFs = END_CAF_LIST;
2937 //@cindex revert_dead_CAFs
2939 void revert_dead_CAFs(void)
2941 StgCAF* caf = enteredCAFs;
2942 enteredCAFs = END_CAF_LIST;
2943 while (caf != END_CAF_LIST) {
2946 new = (StgCAF*)isAlive((StgClosure*)caf);
2948 new->link = enteredCAFs;
2952 SET_INFO(caf,&CAF_UNENTERED_info);
2953 caf->value = (StgClosure*)0xdeadbeef;
2954 caf->link = (StgCAF*)0xdeadbeef;
2960 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
2961 //@subsection Sanity code for CAF garbage collection
2963 /* -----------------------------------------------------------------------------
2964 Sanity code for CAF garbage collection.
2966 With DEBUG turned on, we manage a CAF list in addition to the SRT
2967 mechanism. After GC, we run down the CAF list and blackhole any
2968 CAFs which have been garbage collected. This means we get an error
2969 whenever the program tries to enter a garbage collected CAF.
2971 Any garbage collected CAFs are taken off the CAF list at the same
2973 -------------------------------------------------------------------------- */
2983 const StgInfoTable *info;
2994 ASSERT(info->type == IND_STATIC);
2996 if (STATIC_LINK(info,p) == NULL) {
2997 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2999 SET_INFO(p,&BLACKHOLE_info);
3000 p = STATIC_LINK2(info,p);
3004 pp = &STATIC_LINK2(info,p);
3011 /* fprintf(stderr, "%d CAFs live\n", i); */
3015 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3016 //@subsection Lazy black holing
3018 /* -----------------------------------------------------------------------------
3021 Whenever a thread returns to the scheduler after possibly doing
3022 some work, we have to run down the stack and black-hole all the
3023 closures referred to by update frames.
3024 -------------------------------------------------------------------------- */
3025 //@cindex threadLazyBlackHole
3028 threadLazyBlackHole(StgTSO *tso)
3030 StgUpdateFrame *update_frame;
3031 StgBlockingQueue *bh;
3034 stack_end = &tso->stack[tso->stack_size];
3035 update_frame = tso->su;
3038 switch (get_itbl(update_frame)->type) {
3041 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
3045 bh = (StgBlockingQueue *)update_frame->updatee;
3047 /* if the thunk is already blackholed, it means we've also
3048 * already blackholed the rest of the thunks on this stack,
3049 * so we can stop early.
3051 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3052 * don't interfere with this optimisation.
3054 if (bh->header.info == &BLACKHOLE_info) {
3058 if (bh->header.info != &BLACKHOLE_BQ_info &&
3059 bh->header.info != &CAF_BLACKHOLE_info) {
3060 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3061 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3063 SET_INFO(bh,&BLACKHOLE_info);
3066 update_frame = update_frame->link;
3070 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
3076 barf("threadPaused");
3081 //@node Stack squeezing, Pausing a thread, Lazy black holing
3082 //@subsection Stack squeezing
3084 /* -----------------------------------------------------------------------------
3087 * Code largely pinched from old RTS, then hacked to bits. We also do
3088 * lazy black holing here.
3090 * -------------------------------------------------------------------------- */
3091 //@cindex threadSqueezeStack
3094 threadSqueezeStack(StgTSO *tso)
3096 lnat displacement = 0;
3097 StgUpdateFrame *frame;
3098 StgUpdateFrame *next_frame; /* Temporally next */
3099 StgUpdateFrame *prev_frame; /* Temporally previous */
3101 rtsBool prev_was_update_frame;
3103 StgUpdateFrame *top_frame;
3104 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3106 void printObj( StgClosure *obj ); // from Printer.c
3108 top_frame = tso->su;
3111 bottom = &(tso->stack[tso->stack_size]);
3114 /* There must be at least one frame, namely the STOP_FRAME.
3116 ASSERT((P_)frame < bottom);
3118 /* Walk down the stack, reversing the links between frames so that
3119 * we can walk back up as we squeeze from the bottom. Note that
3120 * next_frame and prev_frame refer to next and previous as they were
3121 * added to the stack, rather than the way we see them in this
3122 * walk. (It makes the next loop less confusing.)
3124 * Stop if we find an update frame pointing to a black hole
3125 * (see comment in threadLazyBlackHole()).
3129 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3130 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3131 prev_frame = frame->link;
3132 frame->link = next_frame;
3137 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3138 printObj((StgClosure *)prev_frame);
3139 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3142 switch (get_itbl(frame)->type) {
3143 case UPDATE_FRAME: upd_frames++;
3144 if (frame->updatee->header.info == &BLACKHOLE_info)
3147 case STOP_FRAME: stop_frames++;
3149 case CATCH_FRAME: catch_frames++;
3151 case SEQ_FRAME: seq_frames++;
3154 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3156 printObj((StgClosure *)prev_frame);
3159 if (get_itbl(frame)->type == UPDATE_FRAME
3160 && frame->updatee->header.info == &BLACKHOLE_info) {
3165 /* Now, we're at the bottom. Frame points to the lowest update
3166 * frame on the stack, and its link actually points to the frame
3167 * above. We have to walk back up the stack, squeezing out empty
3168 * update frames and turning the pointers back around on the way
3171 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3172 * we never want to eliminate it anyway. Just walk one step up
3173 * before starting to squeeze. When you get to the topmost frame,
3174 * remember that there are still some words above it that might have
3181 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3184 * Loop through all of the frames (everything except the very
3185 * bottom). Things are complicated by the fact that we have
3186 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3187 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3189 while (frame != NULL) {
3191 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3192 rtsBool is_update_frame;
3194 next_frame = frame->link;
3195 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3198 * 1. both the previous and current frame are update frames
3199 * 2. the current frame is empty
3201 if (prev_was_update_frame && is_update_frame &&
3202 (P_)prev_frame == frame_bottom + displacement) {
3204 /* Now squeeze out the current frame */
3205 StgClosure *updatee_keep = prev_frame->updatee;
3206 StgClosure *updatee_bypass = frame->updatee;
3209 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3213 /* Deal with blocking queues. If both updatees have blocked
3214 * threads, then we should merge the queues into the update
3215 * frame that we're keeping.
3217 * Alternatively, we could just wake them up: they'll just go
3218 * straight to sleep on the proper blackhole! This is less code
3219 * and probably less bug prone, although it's probably much
3222 #if 0 /* do it properly... */
3223 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3224 # error Unimplemented lazy BH warning. (KSW 1999-01)
3226 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3227 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3229 /* Sigh. It has one. Don't lose those threads! */
3230 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3231 /* Urgh. Two queues. Merge them. */
3232 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3234 while (keep_tso->link != END_TSO_QUEUE) {
3235 keep_tso = keep_tso->link;
3237 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3240 /* For simplicity, just swap the BQ for the BH */
3241 P_ temp = updatee_keep;
3243 updatee_keep = updatee_bypass;
3244 updatee_bypass = temp;
3246 /* Record the swap in the kept frame (below) */
3247 prev_frame->updatee = updatee_keep;
3252 TICK_UPD_SQUEEZED();
3253 /* wasn't there something about update squeezing and ticky to be
3254 * sorted out? oh yes: we aren't counting each enter properly
3255 * in this case. See the log somewhere. KSW 1999-04-21
3257 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3259 sp = (P_)frame - 1; /* sp = stuff to slide */
3260 displacement += sizeofW(StgUpdateFrame);
3263 /* No squeeze for this frame */
3264 sp = frame_bottom - 1; /* Keep the current frame */
3266 /* Do lazy black-holing.
3268 if (is_update_frame) {
3269 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3270 if (bh->header.info != &BLACKHOLE_info &&
3271 bh->header.info != &BLACKHOLE_BQ_info &&
3272 bh->header.info != &CAF_BLACKHOLE_info) {
3273 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3274 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3276 SET_INFO(bh,&BLACKHOLE_info);
3280 /* Fix the link in the current frame (should point to the frame below) */
3281 frame->link = prev_frame;
3282 prev_was_update_frame = is_update_frame;
3285 /* Now slide all words from sp up to the next frame */
3287 if (displacement > 0) {
3288 P_ next_frame_bottom;
3290 if (next_frame != NULL)
3291 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3293 next_frame_bottom = tso->sp - 1;
3297 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3301 while (sp >= next_frame_bottom) {
3302 sp[displacement] = *sp;
3306 (P_)prev_frame = (P_)frame + displacement;
3310 tso->sp += displacement;
3311 tso->su = prev_frame;
3314 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3315 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3319 //@node Pausing a thread, Index, Stack squeezing
3320 //@subsection Pausing a thread
3322 /* -----------------------------------------------------------------------------
3325 * We have to prepare for GC - this means doing lazy black holing
3326 * here. We also take the opportunity to do stack squeezing if it's
3328 * -------------------------------------------------------------------------- */
3329 //@cindex threadPaused
3332 threadPaused(StgTSO *tso)
3334 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3335 threadSqueezeStack(tso); /* does black holing too */
3337 threadLazyBlackHole(tso);
3340 /* -----------------------------------------------------------------------------
3342 * -------------------------------------------------------------------------- */
3345 //@cindex printMutOnceList
3347 printMutOnceList(generation *gen)
3349 StgMutClosure *p, *next;
3351 p = gen->mut_once_list;
3354 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3355 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3356 fprintf(stderr, "%p (%s), ",
3357 p, info_type((StgClosure *)p));
3359 fputc('\n', stderr);
3362 //@cindex printMutableList
3364 printMutableList(generation *gen)
3366 StgMutClosure *p, *next;
3368 p = gen->saved_mut_list;
3371 fprintf(stderr, "@@ Mutable list %p: ", gen->saved_mut_list);
3372 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3373 fprintf(stderr, "%p (%s), ",
3374 p, info_type((StgClosure *)p));
3376 fputc('\n', stderr);
3380 //@node Index, , Pausing a thread
3384 //* GarbageCollect:: @cindex\s-+GarbageCollect
3385 //* MarkRoot:: @cindex\s-+MarkRoot
3386 //* RevertCAFs:: @cindex\s-+RevertCAFs
3387 //* addBlock:: @cindex\s-+addBlock
3388 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3389 //* copy:: @cindex\s-+copy
3390 //* copyPart:: @cindex\s-+copyPart
3391 //* evacuate:: @cindex\s-+evacuate
3392 //* evacuate_large:: @cindex\s-+evacuate_large
3393 //* gcCAFs:: @cindex\s-+gcCAFs
3394 //* isAlive:: @cindex\s-+isAlive
3395 //* mkMutCons:: @cindex\s-+mkMutCons
3396 //* relocate_TSO:: @cindex\s-+relocate_TSO
3397 //* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs
3398 //* scavenge:: @cindex\s-+scavenge
3399 //* scavenge_large:: @cindex\s-+scavenge_large
3400 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3401 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3402 //* scavenge_one:: @cindex\s-+scavenge_one
3403 //* scavenge_srt:: @cindex\s-+scavenge_srt
3404 //* scavenge_stack:: @cindex\s-+scavenge_stack
3405 //* scavenge_static:: @cindex\s-+scavenge_static
3406 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3407 //* threadPaused:: @cindex\s-+threadPaused
3408 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3409 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3410 //* upd_evacuee:: @cindex\s-+upd_evacuee
3411 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3412 //* zero_static_object_list:: @cindex\s-+zero_static_object_list