1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.72 2000/01/22 18:00:03 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
45 #if defined(GRAN) || defined(PAR)
46 # include "GranSimRts.h"
47 # include "ParallelRts.h"
51 # include "ParallelDebug.h"
57 //@node STATIC OBJECT LIST, Static function declarations, Includes
58 //@subsection STATIC OBJECT LIST
60 /* STATIC OBJECT LIST.
63 * We maintain a linked list of static objects that are still live.
64 * The requirements for this list are:
66 * - we need to scan the list while adding to it, in order to
67 * scavenge all the static objects (in the same way that
68 * breadth-first scavenging works for dynamic objects).
70 * - we need to be able to tell whether an object is already on
71 * the list, to break loops.
73 * Each static object has a "static link field", which we use for
74 * linking objects on to the list. We use a stack-type list, consing
75 * objects on the front as they are added (this means that the
76 * scavenge phase is depth-first, not breadth-first, but that
79 * A separate list is kept for objects that have been scavenged
80 * already - this is so that we can zero all the marks afterwards.
82 * An object is on the list if its static link field is non-zero; this
83 * means that we have to mark the end of the list with '1', not NULL.
85 * Extra notes for generational GC:
87 * Each generation has a static object list associated with it. When
88 * collecting generations up to N, we treat the static object lists
89 * from generations > N as roots.
91 * We build up a static object list while collecting generations 0..N,
92 * which is then appended to the static object list of generation N+1.
94 StgClosure* static_objects; /* live static objects */
95 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
97 /* N is the oldest generation being collected, where the generations
98 * are numbered starting at 0. A major GC (indicated by the major_gc
99 * flag) is when we're collecting all generations. We only attempt to
100 * deal with static objects and GC CAFs when doing a major GC.
103 static rtsBool major_gc;
105 /* Youngest generation that objects should be evacuated to in
106 * evacuate(). (Logically an argument to evacuate, but it's static
107 * a lot of the time so we optimise it into a global variable).
113 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
114 static rtsBool weak_done; /* all done for this pass */
116 /* Flag indicating failure to evacuate an object to the desired
119 static rtsBool failed_to_evac;
121 /* Old to-space (used for two-space collector only)
123 bdescr *old_to_space;
125 /* Data used for allocation area sizing.
127 lnat new_blocks; /* blocks allocated during this GC */
128 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
130 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
131 //@subsection Static function declarations
133 /* -----------------------------------------------------------------------------
134 Static function declarations
135 -------------------------------------------------------------------------- */
137 static StgClosure * evacuate ( StgClosure *q );
138 static void zero_static_object_list ( StgClosure* first_static );
139 static void zero_mutable_list ( StgMutClosure *first );
140 static void revert_dead_CAFs ( void );
142 static rtsBool traverse_weak_ptr_list ( void );
143 static void cleanup_weak_ptr_list ( StgWeak **list );
145 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
146 static void scavenge_large ( step *step );
147 static void scavenge ( step *step );
148 static void scavenge_static ( void );
149 static void scavenge_mutable_list ( generation *g );
150 static void scavenge_mut_once_list ( generation *g );
153 static void gcCAFs ( void );
156 //@node Garbage Collect, Weak Pointers, Static function declarations
157 //@subsection Garbage Collect
159 /* -----------------------------------------------------------------------------
162 For garbage collecting generation N (and all younger generations):
164 - follow all pointers in the root set. the root set includes all
165 mutable objects in all steps in all generations.
167 - for each pointer, evacuate the object it points to into either
168 + to-space in the next higher step in that generation, if one exists,
169 + if the object's generation == N, then evacuate it to the next
170 generation if one exists, or else to-space in the current
172 + if the object's generation < N, then evacuate it to to-space
173 in the next generation.
175 - repeatedly scavenge to-space from each step in each generation
176 being collected until no more objects can be evacuated.
178 - free from-space in each step, and set from-space = to-space.
180 -------------------------------------------------------------------------- */
181 //@cindex GarbageCollect
183 void GarbageCollect(void (*get_roots)(void))
187 lnat live, allocated, collected = 0, copied = 0;
191 CostCentreStack *prev_CCS;
194 #if defined(DEBUG) && defined(GRAN)
195 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
199 /* tell the stats department that we've started a GC */
202 /* attribute any costs to CCS_GC */
208 /* Approximate how much we allocated */
209 allocated = calcAllocated();
211 /* Figure out which generation to collect
214 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
215 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
219 major_gc = (N == RtsFlags.GcFlags.generations-1);
221 /* check stack sanity *before* GC (ToDo: check all threads) */
223 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
225 IF_DEBUG(sanity, checkFreeListSanity());
227 /* Initialise the static object lists
229 static_objects = END_OF_STATIC_LIST;
230 scavenged_static_objects = END_OF_STATIC_LIST;
232 /* zero the mutable list for the oldest generation (see comment by
233 * zero_mutable_list below).
236 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
239 /* Save the old to-space if we're doing a two-space collection
241 if (RtsFlags.GcFlags.generations == 1) {
242 old_to_space = g0s0->to_space;
243 g0s0->to_space = NULL;
246 /* Keep a count of how many new blocks we allocated during this GC
247 * (used for resizing the allocation area, later).
251 /* Initialise to-space in all the generations/steps that we're
254 for (g = 0; g <= N; g++) {
255 generations[g].mut_once_list = END_MUT_LIST;
256 generations[g].mut_list = END_MUT_LIST;
258 for (s = 0; s < generations[g].n_steps; s++) {
260 /* generation 0, step 0 doesn't need to-space */
261 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
265 /* Get a free block for to-space. Extra blocks will be chained on
269 step = &generations[g].steps[s];
270 ASSERT(step->gen->no == g);
271 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
272 bd->gen = &generations[g];
275 bd->evacuated = 1; /* it's a to-space block */
276 step->hp = bd->start;
277 step->hpLim = step->hp + BLOCK_SIZE_W;
281 step->scan = bd->start;
283 step->new_large_objects = NULL;
284 step->scavenged_large_objects = NULL;
286 /* mark the large objects as not evacuated yet */
287 for (bd = step->large_objects; bd; bd = bd->link) {
293 /* make sure the older generations have at least one block to
294 * allocate into (this makes things easier for copy(), see below.
296 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
297 for (s = 0; s < generations[g].n_steps; s++) {
298 step = &generations[g].steps[s];
299 if (step->hp_bd == NULL) {
301 bd->gen = &generations[g];
304 bd->evacuated = 0; /* *not* a to-space block */
305 step->hp = bd->start;
306 step->hpLim = step->hp + BLOCK_SIZE_W;
312 /* Set the scan pointer for older generations: remember we
313 * still have to scavenge objects that have been promoted. */
314 step->scan = step->hp;
315 step->scan_bd = step->hp_bd;
316 step->to_space = NULL;
318 step->new_large_objects = NULL;
319 step->scavenged_large_objects = NULL;
323 /* -----------------------------------------------------------------------
324 * follow all the roots that we know about:
325 * - mutable lists from each generation > N
326 * we want to *scavenge* these roots, not evacuate them: they're not
327 * going to move in this GC.
328 * Also: do them in reverse generation order. This is because we
329 * often want to promote objects that are pointed to by older
330 * generations early, so we don't have to repeatedly copy them.
331 * Doing the generations in reverse order ensures that we don't end
332 * up in the situation where we want to evac an object to gen 3 and
333 * it has already been evaced to gen 2.
337 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
338 generations[g].saved_mut_list = generations[g].mut_list;
339 generations[g].mut_list = END_MUT_LIST;
342 /* Do the mut-once lists first */
343 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
344 IF_PAR_DEBUG(verbose,
345 printMutOnceList(&generations[g]));
346 scavenge_mut_once_list(&generations[g]);
348 for (st = generations[g].n_steps-1; st >= 0; st--) {
349 scavenge(&generations[g].steps[st]);
353 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
354 IF_PAR_DEBUG(verbose,
355 printMutableList(&generations[g]));
356 scavenge_mutable_list(&generations[g]);
358 for (st = generations[g].n_steps-1; st >= 0; st--) {
359 scavenge(&generations[g].steps[st]);
364 /* follow all the roots that the application knows about.
370 /* And don't forget to mark the TSO if we got here direct from
372 /* Not needed in a seq version?
374 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
378 /* Mark the entries in the GALA table of the parallel system */
379 markLocalGAs(major_gc);
382 /* Mark the weak pointer list, and prepare to detect dead weak
385 old_weak_ptr_list = weak_ptr_list;
386 weak_ptr_list = NULL;
387 weak_done = rtsFalse;
389 /* Mark the stable pointer table.
391 markStablePtrTable(major_gc);
395 /* ToDo: To fix the caf leak, we need to make the commented out
396 * parts of this code do something sensible - as described in
399 extern void markHugsObjects(void);
404 /* -------------------------------------------------------------------------
405 * Repeatedly scavenge all the areas we know about until there's no
406 * more scavenging to be done.
413 /* scavenge static objects */
414 if (major_gc && static_objects != END_OF_STATIC_LIST) {
418 /* When scavenging the older generations: Objects may have been
419 * evacuated from generations <= N into older generations, and we
420 * need to scavenge these objects. We're going to try to ensure that
421 * any evacuations that occur move the objects into at least the
422 * same generation as the object being scavenged, otherwise we
423 * have to create new entries on the mutable list for the older
427 /* scavenge each step in generations 0..maxgen */
431 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
432 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
433 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
436 step = &generations[gen].steps[st];
438 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
443 if (step->new_large_objects != NULL) {
444 scavenge_large(step);
451 if (flag) { goto loop; }
453 /* must be last... */
454 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
459 /* Final traversal of the weak pointer list (see comment by
460 * cleanUpWeakPtrList below).
462 cleanup_weak_ptr_list(&weak_ptr_list);
464 /* Now see which stable names are still alive.
466 gcStablePtrTable(major_gc);
468 /* revert dead CAFs and update enteredCAFs list */
471 /* Set the maximum blocks for the oldest generation, based on twice
472 * the amount of live data now, adjusted to fit the maximum heap
475 * This is an approximation, since in the worst case we'll need
476 * twice the amount of live data plus whatever space the other
479 if (RtsFlags.GcFlags.generations > 1) {
481 oldest_gen->max_blocks =
482 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
483 RtsFlags.GcFlags.minOldGenSize);
484 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
485 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
486 if (((int)oldest_gen->max_blocks -
487 (int)oldest_gen->steps[0].to_blocks) <
488 (RtsFlags.GcFlags.pcFreeHeap *
489 RtsFlags.GcFlags.maxHeapSize / 200)) {
496 /* run through all the generations/steps and tidy up
498 copied = new_blocks * BLOCK_SIZE_W;
499 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
502 generations[g].collections++; /* for stats */
505 for (s = 0; s < generations[g].n_steps; s++) {
507 step = &generations[g].steps[s];
509 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
510 /* Tidy the end of the to-space chains */
511 step->hp_bd->free = step->hp;
512 step->hp_bd->link = NULL;
513 /* stats information: how much we copied */
515 copied -= step->hp_bd->start + BLOCK_SIZE_W -
520 /* for generations we collected... */
523 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
525 /* free old memory and shift to-space into from-space for all
526 * the collected steps (except the allocation area). These
527 * freed blocks will probaby be quickly recycled.
529 if (!(g == 0 && s == 0)) {
530 freeChain(step->blocks);
531 step->blocks = step->to_space;
532 step->n_blocks = step->to_blocks;
533 step->to_space = NULL;
535 for (bd = step->blocks; bd != NULL; bd = bd->link) {
536 bd->evacuated = 0; /* now from-space */
540 /* LARGE OBJECTS. The current live large objects are chained on
541 * scavenged_large, having been moved during garbage
542 * collection from large_objects. Any objects left on
543 * large_objects list are therefore dead, so we free them here.
545 for (bd = step->large_objects; bd != NULL; bd = next) {
550 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
553 step->large_objects = step->scavenged_large_objects;
555 /* Set the maximum blocks for this generation, interpolating
556 * between the maximum size of the oldest and youngest
559 * max_blocks = oldgen_max_blocks * G
560 * ----------------------
565 generations[g].max_blocks = (oldest_gen->max_blocks * g)
566 / (RtsFlags.GcFlags.generations-1);
568 generations[g].max_blocks = oldest_gen->max_blocks;
571 /* for older generations... */
574 /* For older generations, we need to append the
575 * scavenged_large_object list (i.e. large objects that have been
576 * promoted during this GC) to the large_object list for that step.
578 for (bd = step->scavenged_large_objects; bd; bd = next) {
581 dbl_link_onto(bd, &step->large_objects);
584 /* add the new blocks we promoted during this GC */
585 step->n_blocks += step->to_blocks;
590 /* Guess the amount of live data for stats. */
593 /* Free the small objects allocated via allocate(), since this will
594 * all have been copied into G0S1 now.
596 if (small_alloc_list != NULL) {
597 freeChain(small_alloc_list);
599 small_alloc_list = NULL;
603 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
605 /* Two-space collector:
606 * Free the old to-space, and estimate the amount of live data.
608 if (RtsFlags.GcFlags.generations == 1) {
611 if (old_to_space != NULL) {
612 freeChain(old_to_space);
614 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
615 bd->evacuated = 0; /* now from-space */
618 /* For a two-space collector, we need to resize the nursery. */
620 /* set up a new nursery. Allocate a nursery size based on a
621 * function of the amount of live data (currently a factor of 2,
622 * should be configurable (ToDo)). Use the blocks from the old
623 * nursery if possible, freeing up any left over blocks.
625 * If we get near the maximum heap size, then adjust our nursery
626 * size accordingly. If the nursery is the same size as the live
627 * data (L), then we need 3L bytes. We can reduce the size of the
628 * nursery to bring the required memory down near 2L bytes.
630 * A normal 2-space collector would need 4L bytes to give the same
631 * performance we get from 3L bytes, reducing to the same
632 * performance at 2L bytes.
634 blocks = g0s0->to_blocks;
636 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
637 RtsFlags.GcFlags.maxHeapSize ) {
638 int adjusted_blocks; /* signed on purpose */
641 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
642 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
643 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
644 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
647 blocks = adjusted_blocks;
650 blocks *= RtsFlags.GcFlags.oldGenFactor;
651 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
652 blocks = RtsFlags.GcFlags.minAllocAreaSize;
655 resizeNursery(blocks);
658 /* Generational collector:
659 * If the user has given us a suggested heap size, adjust our
660 * allocation area to make best use of the memory available.
663 if (RtsFlags.GcFlags.heapSizeSuggestion) {
665 nat needed = calcNeeded(); /* approx blocks needed at next GC */
667 /* Guess how much will be live in generation 0 step 0 next time.
668 * A good approximation is the obtained by finding the
669 * percentage of g0s0 that was live at the last minor GC.
672 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
675 /* Estimate a size for the allocation area based on the
676 * information available. We might end up going slightly under
677 * or over the suggested heap size, but we should be pretty
680 * Formula: suggested - needed
681 * ----------------------------
682 * 1 + g0s0_pcnt_kept/100
684 * where 'needed' is the amount of memory needed at the next
685 * collection for collecting all steps except g0s0.
688 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
689 (100 + (int)g0s0_pcnt_kept);
691 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
692 blocks = RtsFlags.GcFlags.minAllocAreaSize;
695 resizeNursery((nat)blocks);
699 /* mark the garbage collected CAFs as dead */
701 if (major_gc) { gcCAFs(); }
704 /* zero the scavenged static object list */
706 zero_static_object_list(scavenged_static_objects);
714 /* Reconstruct the Global Address tables used in GUM */
715 RebuildGAtables(major_gc);
718 /* start any pending finalizers */
719 scheduleFinalizers(old_weak_ptr_list);
721 /* check sanity after GC */
722 IF_DEBUG(sanity, checkSanity(N));
724 /* extra GC trace info */
725 IF_DEBUG(gc, stat_describe_gens());
728 /* symbol-table based profiling */
729 /* heapCensus(to_space); */ /* ToDo */
732 /* restore enclosing cost centre */
738 /* check for memory leaks if sanity checking is on */
739 IF_DEBUG(sanity, memInventory());
741 /* ok, GC over: tell the stats department what happened. */
742 stat_endGC(allocated, collected, live, copied, N);
745 //@node Weak Pointers, Evacuation, Garbage Collect
746 //@subsection Weak Pointers
748 /* -----------------------------------------------------------------------------
751 traverse_weak_ptr_list is called possibly many times during garbage
752 collection. It returns a flag indicating whether it did any work
753 (i.e. called evacuate on any live pointers).
755 Invariant: traverse_weak_ptr_list is called when the heap is in an
756 idempotent state. That means that there are no pending
757 evacuate/scavenge operations. This invariant helps the weak
758 pointer code decide which weak pointers are dead - if there are no
759 new live weak pointers, then all the currently unreachable ones are
762 For generational GC: we just don't try to finalize weak pointers in
763 older generations than the one we're collecting. This could
764 probably be optimised by keeping per-generation lists of weak
765 pointers, but for a few weak pointers this scheme will work.
766 -------------------------------------------------------------------------- */
767 //@cindex traverse_weak_ptr_list
770 traverse_weak_ptr_list(void)
772 StgWeak *w, **last_w, *next_w;
774 rtsBool flag = rtsFalse;
776 if (weak_done) { return rtsFalse; }
778 /* doesn't matter where we evacuate values/finalizers to, since
779 * these pointers are treated as roots (iff the keys are alive).
783 last_w = &old_weak_ptr_list;
784 for (w = old_weak_ptr_list; w; w = next_w) {
786 /* First, this weak pointer might have been evacuated. If so,
787 * remove the forwarding pointer from the weak_ptr_list.
789 if (get_itbl(w)->type == EVACUATED) {
790 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
794 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
795 * called on a live weak pointer object. Just remove it.
797 if (w->header.info == &DEAD_WEAK_info) {
798 next_w = ((StgDeadWeak *)w)->link;
803 ASSERT(get_itbl(w)->type == WEAK);
805 /* Now, check whether the key is reachable.
807 if ((new = isAlive(w->key))) {
809 /* evacuate the value and finalizer */
810 w->value = evacuate(w->value);
811 w->finalizer = evacuate(w->finalizer);
812 /* remove this weak ptr from the old_weak_ptr list */
814 /* and put it on the new weak ptr list */
816 w->link = weak_ptr_list;
819 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
829 /* If we didn't make any changes, then we can go round and kill all
830 * the dead weak pointers. The old_weak_ptr list is used as a list
831 * of pending finalizers later on.
833 if (flag == rtsFalse) {
834 cleanup_weak_ptr_list(&old_weak_ptr_list);
835 for (w = old_weak_ptr_list; w; w = w->link) {
836 w->finalizer = evacuate(w->finalizer);
844 /* -----------------------------------------------------------------------------
845 After GC, the live weak pointer list may have forwarding pointers
846 on it, because a weak pointer object was evacuated after being
847 moved to the live weak pointer list. We remove those forwarding
850 Also, we don't consider weak pointer objects to be reachable, but
851 we must nevertheless consider them to be "live" and retain them.
852 Therefore any weak pointer objects which haven't as yet been
853 evacuated need to be evacuated now.
854 -------------------------------------------------------------------------- */
856 //@cindex cleanup_weak_ptr_list
859 cleanup_weak_ptr_list ( StgWeak **list )
861 StgWeak *w, **last_w;
864 for (w = *list; w; w = w->link) {
866 if (get_itbl(w)->type == EVACUATED) {
867 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
871 if (Bdescr((P_)w)->evacuated == 0) {
872 (StgClosure *)w = evacuate((StgClosure *)w);
879 /* -----------------------------------------------------------------------------
880 isAlive determines whether the given closure is still alive (after
881 a garbage collection) or not. It returns the new address of the
882 closure if it is alive, or NULL otherwise.
883 -------------------------------------------------------------------------- */
888 isAlive(StgClosure *p)
890 const StgInfoTable *info;
896 /* ToDo: for static closures, check the static link field.
897 * Problem here is that we sometimes don't set the link field, eg.
898 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
901 #if 1 || !defined(PAR)
902 /* ignore closures in generations that we're not collecting. */
903 /* In GUM we use this routine when rebuilding GA tables; for some
904 reason it has problems with the LOOKS_LIKE_STATIC macro -- HWL */
905 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
910 switch (info->type) {
915 case IND_OLDGEN: /* rely on compatible layout with StgInd */
916 case IND_OLDGEN_PERM:
917 /* follow indirections */
918 p = ((StgInd *)p)->indirectee;
923 return ((StgEvacuated *)p)->evacuee;
934 MarkRoot(StgClosure *root)
936 return evacuate(root);
940 static void addBlock(step *step)
942 bdescr *bd = allocBlock();
946 if (step->gen->no <= N) {
952 step->hp_bd->free = step->hp;
953 step->hp_bd->link = bd;
954 step->hp = bd->start;
955 step->hpLim = step->hp + BLOCK_SIZE_W;
961 //@cindex upd_evacuee
963 static __inline__ void
964 upd_evacuee(StgClosure *p, StgClosure *dest)
966 p->header.info = &EVACUATED_info;
967 ((StgEvacuated *)p)->evacuee = dest;
972 static __inline__ StgClosure *
973 copy(StgClosure *src, nat size, step *step)
977 TICK_GC_WORDS_COPIED(size);
978 /* Find out where we're going, using the handy "to" pointer in
979 * the step of the source object. If it turns out we need to
980 * evacuate to an older generation, adjust it here (see comment
983 if (step->gen->no < evac_gen) {
984 #ifdef NO_EAGER_PROMOTION
985 failed_to_evac = rtsTrue;
987 step = &generations[evac_gen].steps[0];
991 /* chain a new block onto the to-space for the destination step if
994 if (step->hp + size >= step->hpLim) {
998 for(to = step->hp, from = (P_)src; size>0; --size) {
1004 upd_evacuee(src,(StgClosure *)dest);
1005 return (StgClosure *)dest;
1008 /* Special version of copy() for when we only want to copy the info
1009 * pointer of an object, but reserve some padding after it. This is
1010 * used to optimise evacuation of BLACKHOLEs.
1015 static __inline__ StgClosure *
1016 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1020 TICK_GC_WORDS_COPIED(size_to_copy);
1021 if (step->gen->no < evac_gen) {
1022 #ifdef NO_EAGER_PROMOTION
1023 failed_to_evac = rtsTrue;
1025 step = &generations[evac_gen].steps[0];
1029 if (step->hp + size_to_reserve >= step->hpLim) {
1033 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1038 step->hp += size_to_reserve;
1039 upd_evacuee(src,(StgClosure *)dest);
1040 return (StgClosure *)dest;
1043 //@node Evacuation, Scavenging, Weak Pointers
1044 //@subsection Evacuation
1046 /* -----------------------------------------------------------------------------
1047 Evacuate a large object
1049 This just consists of removing the object from the (doubly-linked)
1050 large_alloc_list, and linking it on to the (singly-linked)
1051 new_large_objects list, from where it will be scavenged later.
1053 Convention: bd->evacuated is /= 0 for a large object that has been
1054 evacuated, or 0 otherwise.
1055 -------------------------------------------------------------------------- */
1057 //@cindex evacuate_large
1060 evacuate_large(StgPtr p, rtsBool mutable)
1062 bdescr *bd = Bdescr(p);
1065 /* should point to the beginning of the block */
1066 ASSERT(((W_)p & BLOCK_MASK) == 0);
1068 /* already evacuated? */
1069 if (bd->evacuated) {
1070 /* Don't forget to set the failed_to_evac flag if we didn't get
1071 * the desired destination (see comments in evacuate()).
1073 if (bd->gen->no < evac_gen) {
1074 failed_to_evac = rtsTrue;
1075 TICK_GC_FAILED_PROMOTION();
1081 /* remove from large_object list */
1083 bd->back->link = bd->link;
1084 } else { /* first object in the list */
1085 step->large_objects = bd->link;
1088 bd->link->back = bd->back;
1091 /* link it on to the evacuated large object list of the destination step
1093 step = bd->step->to;
1094 if (step->gen->no < evac_gen) {
1095 #ifdef NO_EAGER_PROMOTION
1096 failed_to_evac = rtsTrue;
1098 step = &generations[evac_gen].steps[0];
1103 bd->gen = step->gen;
1104 bd->link = step->new_large_objects;
1105 step->new_large_objects = bd;
1109 recordMutable((StgMutClosure *)p);
1113 /* -----------------------------------------------------------------------------
1114 Adding a MUT_CONS to an older generation.
1116 This is necessary from time to time when we end up with an
1117 old-to-new generation pointer in a non-mutable object. We defer
1118 the promotion until the next GC.
1119 -------------------------------------------------------------------------- */
1124 mkMutCons(StgClosure *ptr, generation *gen)
1129 step = &gen->steps[0];
1131 /* chain a new block onto the to-space for the destination step if
1134 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1138 q = (StgMutVar *)step->hp;
1139 step->hp += sizeofW(StgMutVar);
1141 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1143 recordOldToNewPtrs((StgMutClosure *)q);
1145 return (StgClosure *)q;
1148 /* -----------------------------------------------------------------------------
1151 This is called (eventually) for every live object in the system.
1153 The caller to evacuate specifies a desired generation in the
1154 evac_gen global variable. The following conditions apply to
1155 evacuating an object which resides in generation M when we're
1156 collecting up to generation N
1160 else evac to step->to
1162 if M < evac_gen evac to evac_gen, step 0
1164 if the object is already evacuated, then we check which generation
1167 if M >= evac_gen do nothing
1168 if M < evac_gen set failed_to_evac flag to indicate that we
1169 didn't manage to evacuate this object into evac_gen.
1171 -------------------------------------------------------------------------- */
1175 evacuate(StgClosure *q)
1180 const StgInfoTable *info;
1183 if (HEAP_ALLOCED(q)) {
1185 if (bd->gen->no > N) {
1186 /* Can't evacuate this object, because it's in a generation
1187 * older than the ones we're collecting. Let's hope that it's
1188 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1190 if (bd->gen->no < evac_gen) {
1192 failed_to_evac = rtsTrue;
1193 TICK_GC_FAILED_PROMOTION();
1197 step = bd->step->to;
1200 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1203 /* make sure the info pointer is into text space */
1204 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1205 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1208 if (info->type==RBH) {
1209 info = REVERT_INFOPTR(info);
1211 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1212 q, info_type(q), info, info_type_by_ip(info)));
1216 switch (info -> type) {
1220 nat size = bco_sizeW((StgBCO*)q);
1222 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1223 evacuate_large((P_)q, rtsFalse);
1226 /* just copy the block */
1227 to = copy(q,size,step);
1233 ASSERT(q->header.info != &MUT_CONS_info);
1235 to = copy(q,sizeW_fromITBL(info),step);
1236 recordMutable((StgMutClosure *)to);
1243 return copy(q,sizeofW(StgHeader)+1,step);
1245 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1250 #ifdef NO_PROMOTE_THUNKS
1251 if (bd->gen->no == 0 &&
1252 bd->step->no != 0 &&
1253 bd->step->no == bd->gen->n_steps-1) {
1257 return copy(q,sizeofW(StgHeader)+2,step);
1265 return copy(q,sizeofW(StgHeader)+2,step);
1271 case IND_OLDGEN_PERM:
1277 return copy(q,sizeW_fromITBL(info),step);
1280 case SE_CAF_BLACKHOLE:
1283 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1286 to = copy(q,BLACKHOLE_sizeW(),step);
1287 recordMutable((StgMutClosure *)to);
1290 case THUNK_SELECTOR:
1292 const StgInfoTable* selectee_info;
1293 StgClosure* selectee = ((StgSelector*)q)->selectee;
1296 selectee_info = get_itbl(selectee);
1297 switch (selectee_info->type) {
1306 StgWord32 offset = info->layout.selector_offset;
1308 /* check that the size is in range */
1310 (StgWord32)(selectee_info->layout.payload.ptrs +
1311 selectee_info->layout.payload.nptrs));
1313 /* perform the selection! */
1314 q = selectee->payload[offset];
1316 /* if we're already in to-space, there's no need to continue
1317 * with the evacuation, just update the source address with
1318 * a pointer to the (evacuated) constructor field.
1320 if (HEAP_ALLOCED(q)) {
1321 bdescr *bd = Bdescr((P_)q);
1322 if (bd->evacuated) {
1323 if (bd->gen->no < evac_gen) {
1324 failed_to_evac = rtsTrue;
1325 TICK_GC_FAILED_PROMOTION();
1331 /* otherwise, carry on and evacuate this constructor field,
1332 * (but not the constructor itself)
1341 case IND_OLDGEN_PERM:
1342 selectee = stgCast(StgInd *,selectee)->indirectee;
1346 selectee = stgCast(StgCAF *,selectee)->value;
1350 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1360 case THUNK_SELECTOR:
1361 /* aargh - do recursively???? */
1364 case SE_CAF_BLACKHOLE:
1368 /* not evaluated yet */
1372 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1373 (int)(selectee_info->type));
1376 return copy(q,THUNK_SELECTOR_sizeW(),step);
1380 /* follow chains of indirections, don't evacuate them */
1381 q = ((StgInd*)q)->indirectee;
1385 if (info->srt_len > 0 && major_gc &&
1386 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1387 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1388 static_objects = (StgClosure *)q;
1393 if (info->srt_len > 0 && major_gc &&
1394 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1395 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1396 static_objects = (StgClosure *)q;
1401 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1402 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1403 static_objects = (StgClosure *)q;
1408 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1409 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1410 static_objects = (StgClosure *)q;
1414 case CONSTR_INTLIKE:
1415 case CONSTR_CHARLIKE:
1416 case CONSTR_NOCAF_STATIC:
1417 /* no need to put these on the static linked list, they don't need
1432 /* shouldn't see these */
1433 barf("evacuate: stack frame at %p\n", q);
1437 /* these are special - the payload is a copy of a chunk of stack,
1439 return copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1442 /* Already evacuated, just return the forwarding address.
1443 * HOWEVER: if the requested destination generation (evac_gen) is
1444 * older than the actual generation (because the object was
1445 * already evacuated to a younger generation) then we have to
1446 * set the failed_to_evac flag to indicate that we couldn't
1447 * manage to promote the object to the desired generation.
1449 if (evac_gen > 0) { /* optimisation */
1450 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1451 if (Bdescr((P_)p)->gen->no < evac_gen) {
1452 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1453 failed_to_evac = rtsTrue;
1454 TICK_GC_FAILED_PROMOTION();
1457 return ((StgEvacuated*)q)->evacuee;
1461 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1463 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1464 evacuate_large((P_)q, rtsFalse);
1467 /* just copy the block */
1468 return copy(q,size,step);
1473 case MUT_ARR_PTRS_FROZEN:
1475 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1477 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1478 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1481 /* just copy the block */
1482 to = copy(q,size,step);
1483 if (info->type == MUT_ARR_PTRS) {
1484 recordMutable((StgMutClosure *)to);
1492 StgTSO *tso = (StgTSO *)q;
1493 nat size = tso_sizeW(tso);
1496 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1498 if (tso->whatNext == ThreadRelocated) {
1499 q = (StgClosure *)tso->link;
1503 /* Large TSOs don't get moved, so no relocation is required.
1505 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1506 evacuate_large((P_)q, rtsTrue);
1509 /* To evacuate a small TSO, we need to relocate the update frame
1513 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1515 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1517 /* relocate the stack pointers... */
1518 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1519 new_tso->sp = (StgPtr)new_tso->sp + diff;
1520 new_tso->splim = (StgPtr)new_tso->splim + diff;
1522 relocate_TSO(tso, new_tso);
1524 recordMutable((StgMutClosure *)new_tso);
1525 return (StgClosure *)new_tso;
1530 case RBH: // cf. BLACKHOLE_BQ
1532 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1533 to = copy(q,BLACKHOLE_sizeW(),step);
1534 //ToDo: derive size etc from reverted IP
1535 //to = copy(q,size,step);
1536 recordMutable((StgMutClosure *)to);
1538 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1539 q, info_type(q), to, info_type(to)));
1544 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1545 to = copy(q,sizeofW(StgBlockedFetch),step);
1547 belch("@@ evacuate: %p (%s) to %p (%s)",
1548 q, info_type(q), to, info_type(to)));
1552 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1553 to = copy(q,sizeofW(StgFetchMe),step);
1555 belch("@@ evacuate: %p (%s) to %p (%s)",
1556 q, info_type(q), to, info_type(to)));
1560 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1561 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1563 belch("@@ evacuate: %p (%s) to %p (%s)",
1564 q, info_type(q), to, info_type(to)));
1569 barf("evacuate: strange closure type %d", (int)(info->type));
1575 /* -----------------------------------------------------------------------------
1576 relocate_TSO is called just after a TSO has been copied from src to
1577 dest. It adjusts the update frame list for the new location.
1578 -------------------------------------------------------------------------- */
1579 //@cindex relocate_TSO
1582 relocate_TSO(StgTSO *src, StgTSO *dest)
1589 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1593 while ((P_)su < dest->stack + dest->stack_size) {
1594 switch (get_itbl(su)->type) {
1596 /* GCC actually manages to common up these three cases! */
1599 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1604 cf = (StgCatchFrame *)su;
1605 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1610 sf = (StgSeqFrame *)su;
1611 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1620 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1628 //@node Scavenging, Reverting CAFs, Evacuation
1629 //@subsection Scavenging
1631 //@cindex scavenge_srt
1634 scavenge_srt(const StgInfoTable *info)
1636 StgClosure **srt, **srt_end;
1638 /* evacuate the SRT. If srt_len is zero, then there isn't an
1639 * srt field in the info table. That's ok, because we'll
1640 * never dereference it.
1642 srt = stgCast(StgClosure **,info->srt);
1643 srt_end = srt + info->srt_len;
1644 for (; srt < srt_end; srt++) {
1645 /* Special-case to handle references to closures hiding out in DLLs, since
1646 double indirections required to get at those. The code generator knows
1647 which is which when generating the SRT, so it stores the (indirect)
1648 reference to the DLL closure in the table by first adding one to it.
1649 We check for this here, and undo the addition before evacuating it.
1651 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1652 closure that's fixed at link-time, and no extra magic is required.
1654 #ifdef ENABLE_WIN32_DLL_SUPPORT
1655 if ( stgCast(unsigned long,*srt) & 0x1 ) {
1656 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1666 /* -----------------------------------------------------------------------------
1668 -------------------------------------------------------------------------- */
1671 scavengeTSO (StgTSO *tso)
1673 /* chase the link field for any TSOs on the same queue */
1674 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1675 if ( tso->why_blocked == BlockedOnMVar
1676 || tso->why_blocked == BlockedOnBlackHole
1677 || tso->why_blocked == BlockedOnException) {
1678 tso->block_info.closure = evacuate(tso->block_info.closure);
1680 if ( tso->blocked_exceptions != NULL ) {
1681 tso->blocked_exceptions =
1682 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1684 /* scavenge this thread's stack */
1685 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1688 /* -----------------------------------------------------------------------------
1689 Scavenge a given step until there are no more objects in this step
1692 evac_gen is set by the caller to be either zero (for a step in a
1693 generation < N) or G where G is the generation of the step being
1696 We sometimes temporarily change evac_gen back to zero if we're
1697 scavenging a mutable object where early promotion isn't such a good
1699 -------------------------------------------------------------------------- */
1703 scavenge(step *step)
1706 const StgInfoTable *info;
1708 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1713 failed_to_evac = rtsFalse;
1715 /* scavenge phase - standard breadth-first scavenging of the
1719 while (bd != step->hp_bd || p < step->hp) {
1721 /* If we're at the end of this block, move on to the next block */
1722 if (bd != step->hp_bd && p == bd->free) {
1728 q = p; /* save ptr to object */
1730 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1731 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1733 info = get_itbl((StgClosure *)p);
1735 if (info->type==RBH)
1736 info = REVERT_INFOPTR(info);
1739 switch (info -> type) {
1743 StgBCO* bco = stgCast(StgBCO*,p);
1745 for (i = 0; i < bco->n_ptrs; i++) {
1746 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1748 p += bco_sizeW(bco);
1753 /* treat MVars specially, because we don't want to evacuate the
1754 * mut_link field in the middle of the closure.
1757 StgMVar *mvar = ((StgMVar *)p);
1759 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1760 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1761 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1762 p += sizeofW(StgMVar);
1763 evac_gen = saved_evac_gen;
1771 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1772 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1773 p += sizeofW(StgHeader) + 2;
1778 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1779 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1785 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1786 p += sizeofW(StgHeader) + 1;
1791 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1797 p += sizeofW(StgHeader) + 1;
1804 p += sizeofW(StgHeader) + 2;
1811 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1812 p += sizeofW(StgHeader) + 2;
1827 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1828 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1829 (StgClosure *)*p = evacuate((StgClosure *)*p);
1831 p += info->layout.payload.nptrs;
1836 if (step->gen->no != 0) {
1837 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1840 case IND_OLDGEN_PERM:
1841 ((StgIndOldGen *)p)->indirectee =
1842 evacuate(((StgIndOldGen *)p)->indirectee);
1843 if (failed_to_evac) {
1844 failed_to_evac = rtsFalse;
1845 recordOldToNewPtrs((StgMutClosure *)p);
1847 p += sizeofW(StgIndOldGen);
1852 StgCAF *caf = (StgCAF *)p;
1854 caf->body = evacuate(caf->body);
1855 if (failed_to_evac) {
1856 failed_to_evac = rtsFalse;
1857 recordOldToNewPtrs((StgMutClosure *)p);
1859 caf->mut_link = NULL;
1861 p += sizeofW(StgCAF);
1867 StgCAF *caf = (StgCAF *)p;
1869 caf->body = evacuate(caf->body);
1870 caf->value = evacuate(caf->value);
1871 if (failed_to_evac) {
1872 failed_to_evac = rtsFalse;
1873 recordOldToNewPtrs((StgMutClosure *)p);
1875 caf->mut_link = NULL;
1877 p += sizeofW(StgCAF);
1882 /* ignore MUT_CONSs */
1883 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1885 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1886 evac_gen = saved_evac_gen;
1888 p += sizeofW(StgMutVar);
1892 case SE_CAF_BLACKHOLE:
1895 p += BLACKHOLE_sizeW();
1900 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1901 (StgClosure *)bh->blocking_queue =
1902 evacuate((StgClosure *)bh->blocking_queue);
1903 if (failed_to_evac) {
1904 failed_to_evac = rtsFalse;
1905 recordMutable((StgMutClosure *)bh);
1907 p += BLACKHOLE_sizeW();
1911 case THUNK_SELECTOR:
1913 StgSelector *s = (StgSelector *)p;
1914 s->selectee = evacuate(s->selectee);
1915 p += THUNK_SELECTOR_sizeW();
1921 barf("scavenge:IND???\n");
1923 case CONSTR_INTLIKE:
1924 case CONSTR_CHARLIKE:
1926 case CONSTR_NOCAF_STATIC:
1930 /* Shouldn't see a static object here. */
1931 barf("scavenge: STATIC object\n");
1943 /* Shouldn't see stack frames here. */
1944 barf("scavenge: stack frame\n");
1946 case AP_UPD: /* same as PAPs */
1948 /* Treat a PAP just like a section of stack, not forgetting to
1949 * evacuate the function pointer too...
1952 StgPAP* pap = stgCast(StgPAP*,p);
1954 pap->fun = evacuate(pap->fun);
1955 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1956 p += pap_sizeW(pap);
1961 /* nothing to follow */
1962 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1966 /* follow everything */
1970 evac_gen = 0; /* repeatedly mutable */
1971 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1972 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1973 (StgClosure *)*p = evacuate((StgClosure *)*p);
1975 evac_gen = saved_evac_gen;
1979 case MUT_ARR_PTRS_FROZEN:
1980 /* follow everything */
1982 StgPtr start = p, next;
1984 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1985 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1986 (StgClosure *)*p = evacuate((StgClosure *)*p);
1988 if (failed_to_evac) {
1989 /* we can do this easier... */
1990 recordMutable((StgMutClosure *)start);
1991 failed_to_evac = rtsFalse;
1998 StgTSO *tso = (StgTSO *)p;
2001 evac_gen = saved_evac_gen;
2002 p += tso_sizeW(tso);
2007 case RBH: // cf. BLACKHOLE_BQ
2009 // nat size, ptrs, nonptrs, vhs;
2011 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2012 StgRBH *rbh = (StgRBH *)p;
2013 (StgClosure *)rbh->blocking_queue =
2014 evacuate((StgClosure *)rbh->blocking_queue);
2015 if (failed_to_evac) {
2016 failed_to_evac = rtsFalse;
2017 recordMutable((StgMutClosure *)rbh);
2020 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2021 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2022 // ToDo: use size of reverted closure here!
2023 p += BLACKHOLE_sizeW();
2029 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2030 /* follow the pointer to the node which is being demanded */
2031 (StgClosure *)bf->node =
2032 evacuate((StgClosure *)bf->node);
2033 /* follow the link to the rest of the blocking queue */
2034 (StgClosure *)bf->link =
2035 evacuate((StgClosure *)bf->link);
2036 if (failed_to_evac) {
2037 failed_to_evac = rtsFalse;
2038 recordMutable((StgMutClosure *)bf);
2041 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2042 bf, info_type((StgClosure *)bf),
2043 bf->node, info_type(bf->node)));
2044 p += sizeofW(StgBlockedFetch);
2050 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2051 p, info_type((StgClosure *)p)));
2052 p += sizeofW(StgFetchMe);
2053 break; // nothing to do in this case
2055 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2057 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2058 (StgClosure *)fmbq->blocking_queue =
2059 evacuate((StgClosure *)fmbq->blocking_queue);
2060 if (failed_to_evac) {
2061 failed_to_evac = rtsFalse;
2062 recordMutable((StgMutClosure *)fmbq);
2065 belch("@@ scavenge: %p (%s) exciting, isn't it",
2066 p, info_type((StgClosure *)p)));
2067 p += sizeofW(StgFetchMeBlockingQueue);
2073 barf("scavenge: unimplemented/strange closure type\n");
2079 /* If we didn't manage to promote all the objects pointed to by
2080 * the current object, then we have to designate this object as
2081 * mutable (because it contains old-to-new generation pointers).
2083 if (failed_to_evac) {
2084 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2085 failed_to_evac = rtsFalse;
2093 /* -----------------------------------------------------------------------------
2094 Scavenge one object.
2096 This is used for objects that are temporarily marked as mutable
2097 because they contain old-to-new generation pointers. Only certain
2098 objects can have this property.
2099 -------------------------------------------------------------------------- */
2100 //@cindex scavenge_one
2103 scavenge_one(StgClosure *p)
2105 const StgInfoTable *info;
2108 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2109 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2114 if (info->type==RBH)
2115 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2118 switch (info -> type) {
2121 case FUN_1_0: /* hardly worth specialising these guys */
2141 case IND_OLDGEN_PERM:
2146 end = (P_)p->payload + info->layout.payload.ptrs;
2147 for (q = (P_)p->payload; q < end; q++) {
2148 (StgClosure *)*q = evacuate((StgClosure *)*q);
2154 case SE_CAF_BLACKHOLE:
2159 case THUNK_SELECTOR:
2161 StgSelector *s = (StgSelector *)p;
2162 s->selectee = evacuate(s->selectee);
2166 case AP_UPD: /* same as PAPs */
2168 /* Treat a PAP just like a section of stack, not forgetting to
2169 * evacuate the function pointer too...
2172 StgPAP* pap = (StgPAP *)p;
2174 pap->fun = evacuate(pap->fun);
2175 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2180 /* This might happen if for instance a MUT_CONS was pointing to a
2181 * THUNK which has since been updated. The IND_OLDGEN will
2182 * be on the mutable list anyway, so we don't need to do anything
2188 barf("scavenge_one: strange object");
2191 no_luck = failed_to_evac;
2192 failed_to_evac = rtsFalse;
2197 /* -----------------------------------------------------------------------------
2198 Scavenging mutable lists.
2200 We treat the mutable list of each generation > N (i.e. all the
2201 generations older than the one being collected) as roots. We also
2202 remove non-mutable objects from the mutable list at this point.
2203 -------------------------------------------------------------------------- */
2204 //@cindex scavenge_mut_once_list
2207 scavenge_mut_once_list(generation *gen)
2209 const StgInfoTable *info;
2210 StgMutClosure *p, *next, *new_list;
2212 p = gen->mut_once_list;
2213 new_list = END_MUT_LIST;
2217 failed_to_evac = rtsFalse;
2219 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2221 /* make sure the info pointer is into text space */
2222 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2223 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2227 if (info->type==RBH)
2228 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2230 switch(info->type) {
2233 case IND_OLDGEN_PERM:
2235 /* Try to pull the indirectee into this generation, so we can
2236 * remove the indirection from the mutable list.
2238 ((StgIndOldGen *)p)->indirectee =
2239 evacuate(((StgIndOldGen *)p)->indirectee);
2242 if (RtsFlags.DebugFlags.gc)
2243 /* Debugging code to print out the size of the thing we just
2247 StgPtr start = gen->steps[0].scan;
2248 bdescr *start_bd = gen->steps[0].scan_bd;
2250 scavenge(&gen->steps[0]);
2251 if (start_bd != gen->steps[0].scan_bd) {
2252 size += (P_)BLOCK_ROUND_UP(start) - start;
2253 start_bd = start_bd->link;
2254 while (start_bd != gen->steps[0].scan_bd) {
2255 size += BLOCK_SIZE_W;
2256 start_bd = start_bd->link;
2258 size += gen->steps[0].scan -
2259 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2261 size = gen->steps[0].scan - start;
2263 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2267 /* failed_to_evac might happen if we've got more than two
2268 * generations, we're collecting only generation 0, the
2269 * indirection resides in generation 2 and the indirectee is
2272 if (failed_to_evac) {
2273 failed_to_evac = rtsFalse;
2274 p->mut_link = new_list;
2277 /* the mut_link field of an IND_STATIC is overloaded as the
2278 * static link field too (it just so happens that we don't need
2279 * both at the same time), so we need to NULL it out when
2280 * removing this object from the mutable list because the static
2281 * link fields are all assumed to be NULL before doing a major
2289 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2290 * it from the mutable list if possible by promoting whatever it
2293 ASSERT(p->header.info == &MUT_CONS_info);
2294 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2295 /* didn't manage to promote everything, so put the
2296 * MUT_CONS back on the list.
2298 p->mut_link = new_list;
2305 StgCAF *caf = (StgCAF *)p;
2306 caf->body = evacuate(caf->body);
2307 caf->value = evacuate(caf->value);
2308 if (failed_to_evac) {
2309 failed_to_evac = rtsFalse;
2310 p->mut_link = new_list;
2320 StgCAF *caf = (StgCAF *)p;
2321 caf->body = evacuate(caf->body);
2322 if (failed_to_evac) {
2323 failed_to_evac = rtsFalse;
2324 p->mut_link = new_list;
2333 /* shouldn't have anything else on the mutables list */
2334 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2338 gen->mut_once_list = new_list;
2341 //@cindex scavenge_mutable_list
2344 scavenge_mutable_list(generation *gen)
2346 const StgInfoTable *info;
2347 StgMutClosure *p, *next;
2349 p = gen->saved_mut_list;
2353 failed_to_evac = rtsFalse;
2355 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2357 /* make sure the info pointer is into text space */
2358 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2359 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2363 if (info->type==RBH)
2364 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2366 switch(info->type) {
2368 case MUT_ARR_PTRS_FROZEN:
2369 /* remove this guy from the mutable list, but follow the ptrs
2370 * anyway (and make sure they get promoted to this gen).
2376 belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS_FROZEN %p; size: %#x ; next: %p",
2377 p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
2379 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2381 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2382 (StgClosure *)*q = evacuate((StgClosure *)*q);
2386 if (failed_to_evac) {
2387 failed_to_evac = rtsFalse;
2388 p->mut_link = gen->mut_list;
2395 /* follow everything */
2396 p->mut_link = gen->mut_list;
2402 belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS %p; size: %#x ; next: %p",
2403 p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
2405 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2406 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2407 (StgClosure *)*q = evacuate((StgClosure *)*q);
2413 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2414 * it from the mutable list if possible by promoting whatever it
2418 belch("@@ scavenge_mut_list: scavenging MUT_VAR %p; var: %p ; next: %p",
2419 p, ((StgMutVar *)p)->var, p->mut_link));
2421 ASSERT(p->header.info != &MUT_CONS_info);
2422 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2423 p->mut_link = gen->mut_list;
2429 StgMVar *mvar = (StgMVar *)p;
2432 belch("@@ scavenge_mut_list: scavenging MAVR %p; head: %p; tail: %p; value: %p ; next: %p",
2433 mvar, mvar->head, mvar->tail, mvar->value, p->mut_link));
2435 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2436 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2437 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2438 p->mut_link = gen->mut_list;
2445 StgTSO *tso = (StgTSO *)p;
2449 /* Don't take this TSO off the mutable list - it might still
2450 * point to some younger objects (because we set evac_gen to 0
2453 tso->mut_link = gen->mut_list;
2454 gen->mut_list = (StgMutClosure *)tso;
2460 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2463 belch("@@ scavenge_mut_list: scavenging BLACKHOLE_BQ (%p); next: %p",
2466 (StgClosure *)bh->blocking_queue =
2467 evacuate((StgClosure *)bh->blocking_queue);
2468 p->mut_link = gen->mut_list;
2473 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2476 case IND_OLDGEN_PERM:
2477 /* Try to pull the indirectee into this generation, so we can
2478 * remove the indirection from the mutable list.
2481 ((StgIndOldGen *)p)->indirectee =
2482 evacuate(((StgIndOldGen *)p)->indirectee);
2485 if (failed_to_evac) {
2486 failed_to_evac = rtsFalse;
2487 p->mut_link = gen->mut_once_list;
2488 gen->mut_once_list = p;
2494 // HWL: old PAR code deleted here
2497 /* shouldn't have anything else on the mutables list */
2498 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2503 //@cindex scavenge_static
2506 scavenge_static(void)
2508 StgClosure* p = static_objects;
2509 const StgInfoTable *info;
2511 /* Always evacuate straight to the oldest generation for static
2513 evac_gen = oldest_gen->no;
2515 /* keep going until we've scavenged all the objects on the linked
2517 while (p != END_OF_STATIC_LIST) {
2521 if (info->type==RBH)
2522 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2524 /* make sure the info pointer is into text space */
2525 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2526 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2528 /* Take this object *off* the static_objects list,
2529 * and put it on the scavenged_static_objects list.
2531 static_objects = STATIC_LINK(info,p);
2532 STATIC_LINK(info,p) = scavenged_static_objects;
2533 scavenged_static_objects = p;
2535 switch (info -> type) {
2539 StgInd *ind = (StgInd *)p;
2540 ind->indirectee = evacuate(ind->indirectee);
2542 /* might fail to evacuate it, in which case we have to pop it
2543 * back on the mutable list (and take it off the
2544 * scavenged_static list because the static link and mut link
2545 * pointers are one and the same).
2547 if (failed_to_evac) {
2548 failed_to_evac = rtsFalse;
2549 scavenged_static_objects = STATIC_LINK(info,p);
2550 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2551 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2565 next = (P_)p->payload + info->layout.payload.ptrs;
2566 /* evacuate the pointers */
2567 for (q = (P_)p->payload; q < next; q++) {
2568 (StgClosure *)*q = evacuate((StgClosure *)*q);
2574 barf("scavenge_static");
2577 ASSERT(failed_to_evac == rtsFalse);
2579 /* get the next static object from the list. Remeber, there might
2580 * be more stuff on this list now that we've done some evacuating!
2581 * (static_objects is a global)
2587 /* -----------------------------------------------------------------------------
2588 scavenge_stack walks over a section of stack and evacuates all the
2589 objects pointed to by it. We can use the same code for walking
2590 PAPs, since these are just sections of copied stack.
2591 -------------------------------------------------------------------------- */
2592 //@cindex scavenge_stack
2595 scavenge_stack(StgPtr p, StgPtr stack_end)
2598 const StgInfoTable* info;
2601 IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2604 * Each time around this loop, we are looking at a chunk of stack
2605 * that starts with either a pending argument section or an
2606 * activation record.
2609 while (p < stack_end) {
2612 /* If we've got a tag, skip over that many words on the stack */
2613 if (IS_ARG_TAG((W_)q)) {
2618 /* Is q a pointer to a closure?
2620 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2622 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2623 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
2625 /* otherwise, must be a pointer into the allocation space. */
2628 (StgClosure *)*p = evacuate((StgClosure *)q);
2634 * Otherwise, q must be the info pointer of an activation
2635 * record. All activation records have 'bitmap' style layout
2638 info = get_itbl((StgClosure *)p);
2640 switch (info->type) {
2642 /* Dynamic bitmap: the mask is stored on the stack */
2644 bitmap = ((StgRetDyn *)p)->liveness;
2645 p = (P_)&((StgRetDyn *)p)->payload[0];
2648 /* probably a slow-entry point return address: */
2656 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2657 old_p, p, old_p+1));
2659 p++; /* what if FHS!=1 !? -- HWL */
2664 /* Specialised code for update frames, since they're so common.
2665 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2666 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2670 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2672 nat type = get_itbl(frame->updatee)->type;
2674 p += sizeofW(StgUpdateFrame);
2675 if (type == EVACUATED) {
2676 frame->updatee = evacuate(frame->updatee);
2679 bdescr *bd = Bdescr((P_)frame->updatee);
2681 if (bd->gen->no > N) {
2682 if (bd->gen->no < evac_gen) {
2683 failed_to_evac = rtsTrue;
2688 /* Don't promote blackholes */
2690 if (!(step->gen->no == 0 &&
2692 step->no == step->gen->n_steps-1)) {
2699 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2700 sizeofW(StgHeader), step);
2701 frame->updatee = to;
2704 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2705 frame->updatee = to;
2706 recordMutable((StgMutClosure *)to);
2709 /* will never be SE_{,CAF_}BLACKHOLE, since we
2710 don't push an update frame for single-entry thunks. KSW 1999-01. */
2711 barf("scavenge_stack: UPDATE_FRAME updatee");
2716 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2721 // StgPtr old_p = p; // debugging only -- HWL
2722 /* stack frames like these are ordinary closures and therefore may
2723 contain setup-specific fixed-header words (as in GranSim!);
2724 therefore, these cases should not use p++ but &(p->payload) -- HWL */
2725 // IF_DEBUG(gran, IF_DEBUG(sanity, printObj(p)));
2726 bitmap = info->layout.bitmap;
2728 p = (StgPtr)&(((StgClosure *)p)->payload);
2729 // IF_DEBUG(sanity, belch("HWL: scavenge_stack: (STOP|CATCH|SEQ)_FRAME adjusting p from %p to %p (instead of %p)", old_p, p, old_p+1));
2735 bitmap = info->layout.bitmap;
2737 /* this assumes that the payload starts immediately after the info-ptr */
2739 while (bitmap != 0) {
2740 if ((bitmap & 1) == 0) {
2741 (StgClosure *)*p = evacuate((StgClosure *)*p);
2744 bitmap = bitmap >> 1;
2751 /* large bitmap (> 32 entries) */
2756 StgLargeBitmap *large_bitmap;
2759 large_bitmap = info->layout.large_bitmap;
2762 for (i=0; i<large_bitmap->size; i++) {
2763 bitmap = large_bitmap->bitmap[i];
2764 q = p + sizeof(W_) * 8;
2765 while (bitmap != 0) {
2766 if ((bitmap & 1) == 0) {
2767 (StgClosure *)*p = evacuate((StgClosure *)*p);
2770 bitmap = bitmap >> 1;
2772 if (i+1 < large_bitmap->size) {
2774 (StgClosure *)*p = evacuate((StgClosure *)*p);
2780 /* and don't forget to follow the SRT */
2785 barf("scavenge_stack: weird activation record found on stack.\n");
2790 /*-----------------------------------------------------------------------------
2791 scavenge the large object list.
2793 evac_gen set by caller; similar games played with evac_gen as with
2794 scavenge() - see comment at the top of scavenge(). Most large
2795 objects are (repeatedly) mutable, so most of the time evac_gen will
2797 --------------------------------------------------------------------------- */
2798 //@cindex scavenge_large
2801 scavenge_large(step *step)
2805 const StgInfoTable* info;
2806 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2808 evac_gen = 0; /* most objects are mutable */
2809 bd = step->new_large_objects;
2811 for (; bd != NULL; bd = step->new_large_objects) {
2813 /* take this object *off* the large objects list and put it on
2814 * the scavenged large objects list. This is so that we can
2815 * treat new_large_objects as a stack and push new objects on
2816 * the front when evacuating.
2818 step->new_large_objects = bd->link;
2819 dbl_link_onto(bd, &step->scavenged_large_objects);
2822 info = get_itbl(stgCast(StgClosure*,p));
2824 switch (info->type) {
2826 /* only certain objects can be "large"... */
2829 /* nothing to follow */
2833 /* follow everything */
2837 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2838 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2839 (StgClosure *)*p = evacuate((StgClosure *)*p);
2844 case MUT_ARR_PTRS_FROZEN:
2845 /* follow everything */
2847 StgPtr start = p, next;
2849 evac_gen = saved_evac_gen; /* not really mutable */
2850 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2851 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2852 (StgClosure *)*p = evacuate((StgClosure *)*p);
2855 if (failed_to_evac) {
2856 recordMutable((StgMutClosure *)start);
2863 StgBCO* bco = stgCast(StgBCO*,p);
2865 evac_gen = saved_evac_gen;
2866 for (i = 0; i < bco->n_ptrs; i++) {
2867 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2874 scavengeTSO((StgTSO *)p);
2875 // HWL: old PAR code deleted here
2879 barf("scavenge_large: unknown/strange object");
2884 //@cindex zero_static_object_list
2887 zero_static_object_list(StgClosure* first_static)
2891 const StgInfoTable *info;
2893 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2895 link = STATIC_LINK(info, p);
2896 STATIC_LINK(info,p) = NULL;
2900 /* This function is only needed because we share the mutable link
2901 * field with the static link field in an IND_STATIC, so we have to
2902 * zero the mut_link field before doing a major GC, which needs the
2903 * static link field.
2905 * It doesn't do any harm to zero all the mutable link fields on the
2908 //@cindex zero_mutable_list
2911 zero_mutable_list( StgMutClosure *first )
2913 StgMutClosure *next, *c;
2915 for (c = first; c != END_MUT_LIST; c = next) {
2921 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
2922 //@subsection Reverting CAFs
2924 /* -----------------------------------------------------------------------------
2926 -------------------------------------------------------------------------- */
2927 //@cindex RevertCAFs
2929 void RevertCAFs(void)
2931 while (enteredCAFs != END_CAF_LIST) {
2932 StgCAF* caf = enteredCAFs;
2934 enteredCAFs = caf->link;
2935 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2936 SET_INFO(caf,&CAF_UNENTERED_info);
2937 caf->value = stgCast(StgClosure*,0xdeadbeef);
2938 caf->link = stgCast(StgCAF*,0xdeadbeef);
2940 enteredCAFs = END_CAF_LIST;
2943 //@cindex revert_dead_CAFs
2945 void revert_dead_CAFs(void)
2947 StgCAF* caf = enteredCAFs;
2948 enteredCAFs = END_CAF_LIST;
2949 while (caf != END_CAF_LIST) {
2952 new = (StgCAF*)isAlive((StgClosure*)caf);
2954 new->link = enteredCAFs;
2958 SET_INFO(caf,&CAF_UNENTERED_info);
2959 caf->value = (StgClosure*)0xdeadbeef;
2960 caf->link = (StgCAF*)0xdeadbeef;
2966 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
2967 //@subsection Sanity code for CAF garbage collection
2969 /* -----------------------------------------------------------------------------
2970 Sanity code for CAF garbage collection.
2972 With DEBUG turned on, we manage a CAF list in addition to the SRT
2973 mechanism. After GC, we run down the CAF list and blackhole any
2974 CAFs which have been garbage collected. This means we get an error
2975 whenever the program tries to enter a garbage collected CAF.
2977 Any garbage collected CAFs are taken off the CAF list at the same
2979 -------------------------------------------------------------------------- */
2989 const StgInfoTable *info;
3000 ASSERT(info->type == IND_STATIC);
3002 if (STATIC_LINK(info,p) == NULL) {
3003 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3005 SET_INFO(p,&BLACKHOLE_info);
3006 p = STATIC_LINK2(info,p);
3010 pp = &STATIC_LINK2(info,p);
3017 /* fprintf(stderr, "%d CAFs live\n", i); */
3021 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3022 //@subsection Lazy black holing
3024 /* -----------------------------------------------------------------------------
3027 Whenever a thread returns to the scheduler after possibly doing
3028 some work, we have to run down the stack and black-hole all the
3029 closures referred to by update frames.
3030 -------------------------------------------------------------------------- */
3031 //@cindex threadLazyBlackHole
3034 threadLazyBlackHole(StgTSO *tso)
3036 StgUpdateFrame *update_frame;
3037 StgBlockingQueue *bh;
3040 stack_end = &tso->stack[tso->stack_size];
3041 update_frame = tso->su;
3044 switch (get_itbl(update_frame)->type) {
3047 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
3051 bh = (StgBlockingQueue *)update_frame->updatee;
3053 /* if the thunk is already blackholed, it means we've also
3054 * already blackholed the rest of the thunks on this stack,
3055 * so we can stop early.
3057 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3058 * don't interfere with this optimisation.
3060 if (bh->header.info == &BLACKHOLE_info) {
3064 if (bh->header.info != &BLACKHOLE_BQ_info &&
3065 bh->header.info != &CAF_BLACKHOLE_info) {
3066 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3067 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3069 SET_INFO(bh,&BLACKHOLE_info);
3072 update_frame = update_frame->link;
3076 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
3082 barf("threadPaused");
3087 //@node Stack squeezing, Pausing a thread, Lazy black holing
3088 //@subsection Stack squeezing
3090 /* -----------------------------------------------------------------------------
3093 * Code largely pinched from old RTS, then hacked to bits. We also do
3094 * lazy black holing here.
3096 * -------------------------------------------------------------------------- */
3097 //@cindex threadSqueezeStack
3100 threadSqueezeStack(StgTSO *tso)
3102 lnat displacement = 0;
3103 StgUpdateFrame *frame;
3104 StgUpdateFrame *next_frame; /* Temporally next */
3105 StgUpdateFrame *prev_frame; /* Temporally previous */
3107 rtsBool prev_was_update_frame;
3109 StgUpdateFrame *top_frame;
3110 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3112 void printObj( StgClosure *obj ); // from Printer.c
3114 top_frame = tso->su;
3117 bottom = &(tso->stack[tso->stack_size]);
3120 /* There must be at least one frame, namely the STOP_FRAME.
3122 ASSERT((P_)frame < bottom);
3124 /* Walk down the stack, reversing the links between frames so that
3125 * we can walk back up as we squeeze from the bottom. Note that
3126 * next_frame and prev_frame refer to next and previous as they were
3127 * added to the stack, rather than the way we see them in this
3128 * walk. (It makes the next loop less confusing.)
3130 * Stop if we find an update frame pointing to a black hole
3131 * (see comment in threadLazyBlackHole()).
3135 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3136 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3137 prev_frame = frame->link;
3138 frame->link = next_frame;
3143 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3144 printObj((StgClosure *)prev_frame);
3145 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3148 switch (get_itbl(frame)->type) {
3149 case UPDATE_FRAME: upd_frames++;
3150 if (frame->updatee->header.info == &BLACKHOLE_info)
3153 case STOP_FRAME: stop_frames++;
3155 case CATCH_FRAME: catch_frames++;
3157 case SEQ_FRAME: seq_frames++;
3160 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3162 printObj((StgClosure *)prev_frame);
3165 if (get_itbl(frame)->type == UPDATE_FRAME
3166 && frame->updatee->header.info == &BLACKHOLE_info) {
3171 /* Now, we're at the bottom. Frame points to the lowest update
3172 * frame on the stack, and its link actually points to the frame
3173 * above. We have to walk back up the stack, squeezing out empty
3174 * update frames and turning the pointers back around on the way
3177 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3178 * we never want to eliminate it anyway. Just walk one step up
3179 * before starting to squeeze. When you get to the topmost frame,
3180 * remember that there are still some words above it that might have
3187 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3190 * Loop through all of the frames (everything except the very
3191 * bottom). Things are complicated by the fact that we have
3192 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3193 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3195 while (frame != NULL) {
3197 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3198 rtsBool is_update_frame;
3200 next_frame = frame->link;
3201 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3204 * 1. both the previous and current frame are update frames
3205 * 2. the current frame is empty
3207 if (prev_was_update_frame && is_update_frame &&
3208 (P_)prev_frame == frame_bottom + displacement) {
3210 /* Now squeeze out the current frame */
3211 StgClosure *updatee_keep = prev_frame->updatee;
3212 StgClosure *updatee_bypass = frame->updatee;
3215 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3219 /* Deal with blocking queues. If both updatees have blocked
3220 * threads, then we should merge the queues into the update
3221 * frame that we're keeping.
3223 * Alternatively, we could just wake them up: they'll just go
3224 * straight to sleep on the proper blackhole! This is less code
3225 * and probably less bug prone, although it's probably much
3228 #if 0 /* do it properly... */
3229 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3230 # error Unimplemented lazy BH warning. (KSW 1999-01)
3232 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3233 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3235 /* Sigh. It has one. Don't lose those threads! */
3236 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3237 /* Urgh. Two queues. Merge them. */
3238 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3240 while (keep_tso->link != END_TSO_QUEUE) {
3241 keep_tso = keep_tso->link;
3243 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3246 /* For simplicity, just swap the BQ for the BH */
3247 P_ temp = updatee_keep;
3249 updatee_keep = updatee_bypass;
3250 updatee_bypass = temp;
3252 /* Record the swap in the kept frame (below) */
3253 prev_frame->updatee = updatee_keep;
3258 TICK_UPD_SQUEEZED();
3259 /* wasn't there something about update squeezing and ticky to be
3260 * sorted out? oh yes: we aren't counting each enter properly
3261 * in this case. See the log somewhere. KSW 1999-04-21
3263 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3265 sp = (P_)frame - 1; /* sp = stuff to slide */
3266 displacement += sizeofW(StgUpdateFrame);
3269 /* No squeeze for this frame */
3270 sp = frame_bottom - 1; /* Keep the current frame */
3272 /* Do lazy black-holing.
3274 if (is_update_frame) {
3275 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3276 if (bh->header.info != &BLACKHOLE_info &&
3277 bh->header.info != &BLACKHOLE_BQ_info &&
3278 bh->header.info != &CAF_BLACKHOLE_info) {
3279 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3280 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3282 SET_INFO(bh,&BLACKHOLE_info);
3286 /* Fix the link in the current frame (should point to the frame below) */
3287 frame->link = prev_frame;
3288 prev_was_update_frame = is_update_frame;
3291 /* Now slide all words from sp up to the next frame */
3293 if (displacement > 0) {
3294 P_ next_frame_bottom;
3296 if (next_frame != NULL)
3297 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3299 next_frame_bottom = tso->sp - 1;
3303 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3307 while (sp >= next_frame_bottom) {
3308 sp[displacement] = *sp;
3312 (P_)prev_frame = (P_)frame + displacement;
3316 tso->sp += displacement;
3317 tso->su = prev_frame;
3320 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3321 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3325 //@node Pausing a thread, Index, Stack squeezing
3326 //@subsection Pausing a thread
3328 /* -----------------------------------------------------------------------------
3331 * We have to prepare for GC - this means doing lazy black holing
3332 * here. We also take the opportunity to do stack squeezing if it's
3334 * -------------------------------------------------------------------------- */
3335 //@cindex threadPaused
3338 threadPaused(StgTSO *tso)
3340 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3341 threadSqueezeStack(tso); /* does black holing too */
3343 threadLazyBlackHole(tso);
3346 /* -----------------------------------------------------------------------------
3348 * -------------------------------------------------------------------------- */
3351 //@cindex printMutOnceList
3353 printMutOnceList(generation *gen)
3355 StgMutClosure *p, *next;
3357 p = gen->mut_once_list;
3360 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3361 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3362 fprintf(stderr, "%p (%s), ",
3363 p, info_type((StgClosure *)p));
3365 fputc('\n', stderr);
3368 //@cindex printMutableList
3370 printMutableList(generation *gen)
3372 StgMutClosure *p, *next;
3374 p = gen->saved_mut_list;
3377 fprintf(stderr, "@@ Mutable list %p: ", gen->saved_mut_list);
3378 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3379 fprintf(stderr, "%p (%s), ",
3380 p, info_type((StgClosure *)p));
3382 fputc('\n', stderr);
3386 //@node Index, , Pausing a thread
3390 //* GarbageCollect:: @cindex\s-+GarbageCollect
3391 //* MarkRoot:: @cindex\s-+MarkRoot
3392 //* RevertCAFs:: @cindex\s-+RevertCAFs
3393 //* addBlock:: @cindex\s-+addBlock
3394 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3395 //* copy:: @cindex\s-+copy
3396 //* copyPart:: @cindex\s-+copyPart
3397 //* evacuate:: @cindex\s-+evacuate
3398 //* evacuate_large:: @cindex\s-+evacuate_large
3399 //* gcCAFs:: @cindex\s-+gcCAFs
3400 //* isAlive:: @cindex\s-+isAlive
3401 //* mkMutCons:: @cindex\s-+mkMutCons
3402 //* relocate_TSO:: @cindex\s-+relocate_TSO
3403 //* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs
3404 //* scavenge:: @cindex\s-+scavenge
3405 //* scavenge_large:: @cindex\s-+scavenge_large
3406 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3407 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3408 //* scavenge_one:: @cindex\s-+scavenge_one
3409 //* scavenge_srt:: @cindex\s-+scavenge_srt
3410 //* scavenge_stack:: @cindex\s-+scavenge_stack
3411 //* scavenge_static:: @cindex\s-+scavenge_static
3412 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3413 //* threadPaused:: @cindex\s-+threadPaused
3414 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3415 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3416 //* upd_evacuee:: @cindex\s-+upd_evacuee
3417 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3418 //* zero_static_object_list:: @cindex\s-+zero_static_object_list