1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.91 2000/12/11 12:36:59 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
45 #include "StablePriv.h"
47 #if defined(GRAN) || defined(PAR)
48 # include "GranSimRts.h"
49 # include "ParallelRts.h"
53 # include "ParallelDebug.h"
60 #if defined(RTS_GTK_FRONTPANEL)
61 #include "FrontPanel.h"
64 //@node STATIC OBJECT LIST, Static function declarations, Includes
65 //@subsection STATIC OBJECT LIST
67 /* STATIC OBJECT LIST.
70 * We maintain a linked list of static objects that are still live.
71 * The requirements for this list are:
73 * - we need to scan the list while adding to it, in order to
74 * scavenge all the static objects (in the same way that
75 * breadth-first scavenging works for dynamic objects).
77 * - we need to be able to tell whether an object is already on
78 * the list, to break loops.
80 * Each static object has a "static link field", which we use for
81 * linking objects on to the list. We use a stack-type list, consing
82 * objects on the front as they are added (this means that the
83 * scavenge phase is depth-first, not breadth-first, but that
86 * A separate list is kept for objects that have been scavenged
87 * already - this is so that we can zero all the marks afterwards.
89 * An object is on the list if its static link field is non-zero; this
90 * means that we have to mark the end of the list with '1', not NULL.
92 * Extra notes for generational GC:
94 * Each generation has a static object list associated with it. When
95 * collecting generations up to N, we treat the static object lists
96 * from generations > N as roots.
98 * We build up a static object list while collecting generations 0..N,
99 * which is then appended to the static object list of generation N+1.
101 StgClosure* static_objects; /* live static objects */
102 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
104 /* N is the oldest generation being collected, where the generations
105 * are numbered starting at 0. A major GC (indicated by the major_gc
106 * flag) is when we're collecting all generations. We only attempt to
107 * deal with static objects and GC CAFs when doing a major GC.
110 static rtsBool major_gc;
112 /* Youngest generation that objects should be evacuated to in
113 * evacuate(). (Logically an argument to evacuate, but it's static
114 * a lot of the time so we optimise it into a global variable).
120 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
121 static rtsBool weak_done; /* all done for this pass */
123 /* List of all threads during GC
125 static StgTSO *old_all_threads;
126 static StgTSO *resurrected_threads;
128 /* Flag indicating failure to evacuate an object to the desired
131 static rtsBool failed_to_evac;
133 /* Old to-space (used for two-space collector only)
135 bdescr *old_to_space;
137 /* Data used for allocation area sizing.
139 lnat new_blocks; /* blocks allocated during this GC */
140 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
142 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
143 //@subsection Static function declarations
145 /* -----------------------------------------------------------------------------
146 Static function declarations
147 -------------------------------------------------------------------------- */
149 static StgClosure * evacuate ( StgClosure *q );
150 static void zero_static_object_list ( StgClosure* first_static );
151 static void zero_mutable_list ( StgMutClosure *first );
153 static rtsBool traverse_weak_ptr_list ( void );
154 static void cleanup_weak_ptr_list ( StgWeak **list );
156 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
157 static void scavenge_large ( step *step );
158 static void scavenge ( step *step );
159 static void scavenge_static ( void );
160 static void scavenge_mutable_list ( generation *g );
161 static void scavenge_mut_once_list ( generation *g );
164 static void gcCAFs ( void );
167 //@node Garbage Collect, Weak Pointers, Static function declarations
168 //@subsection Garbage Collect
170 /* -----------------------------------------------------------------------------
173 For garbage collecting generation N (and all younger generations):
175 - follow all pointers in the root set. the root set includes all
176 mutable objects in all steps in all generations.
178 - for each pointer, evacuate the object it points to into either
179 + to-space in the next higher step in that generation, if one exists,
180 + if the object's generation == N, then evacuate it to the next
181 generation if one exists, or else to-space in the current
183 + if the object's generation < N, then evacuate it to to-space
184 in the next generation.
186 - repeatedly scavenge to-space from each step in each generation
187 being collected until no more objects can be evacuated.
189 - free from-space in each step, and set from-space = to-space.
191 -------------------------------------------------------------------------- */
192 //@cindex GarbageCollect
194 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
198 lnat live, allocated, collected = 0, copied = 0;
202 CostCentreStack *prev_CCS;
205 #if defined(DEBUG) && defined(GRAN)
206 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
210 /* tell the stats department that we've started a GC */
213 /* attribute any costs to CCS_GC */
219 /* Approximate how much we allocated.
220 * Todo: only when generating stats?
222 allocated = calcAllocated();
224 /* Figure out which generation to collect
226 if (force_major_gc) {
227 N = RtsFlags.GcFlags.generations - 1;
231 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
232 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
236 major_gc = (N == RtsFlags.GcFlags.generations-1);
239 #ifdef RTS_GTK_FRONTPANEL
240 if (RtsFlags.GcFlags.frontpanel) {
241 updateFrontPanelBeforeGC(N);
245 /* check stack sanity *before* GC (ToDo: check all threads) */
247 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
249 IF_DEBUG(sanity, checkFreeListSanity());
251 /* Initialise the static object lists
253 static_objects = END_OF_STATIC_LIST;
254 scavenged_static_objects = END_OF_STATIC_LIST;
256 /* zero the mutable list for the oldest generation (see comment by
257 * zero_mutable_list below).
260 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
263 /* Save the old to-space if we're doing a two-space collection
265 if (RtsFlags.GcFlags.generations == 1) {
266 old_to_space = g0s0->to_space;
267 g0s0->to_space = NULL;
270 /* Keep a count of how many new blocks we allocated during this GC
271 * (used for resizing the allocation area, later).
275 /* Initialise to-space in all the generations/steps that we're
278 for (g = 0; g <= N; g++) {
279 generations[g].mut_once_list = END_MUT_LIST;
280 generations[g].mut_list = END_MUT_LIST;
282 for (s = 0; s < generations[g].n_steps; s++) {
284 /* generation 0, step 0 doesn't need to-space */
285 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
289 /* Get a free block for to-space. Extra blocks will be chained on
293 step = &generations[g].steps[s];
294 ASSERT(step->gen->no == g);
295 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
296 bd->gen = &generations[g];
299 bd->evacuated = 1; /* it's a to-space block */
300 step->hp = bd->start;
301 step->hpLim = step->hp + BLOCK_SIZE_W;
305 step->scan = bd->start;
307 step->new_large_objects = NULL;
308 step->scavenged_large_objects = NULL;
310 /* mark the large objects as not evacuated yet */
311 for (bd = step->large_objects; bd; bd = bd->link) {
317 /* make sure the older generations have at least one block to
318 * allocate into (this makes things easier for copy(), see below.
320 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
321 for (s = 0; s < generations[g].n_steps; s++) {
322 step = &generations[g].steps[s];
323 if (step->hp_bd == NULL) {
325 bd->gen = &generations[g];
328 bd->evacuated = 0; /* *not* a to-space block */
329 step->hp = bd->start;
330 step->hpLim = step->hp + BLOCK_SIZE_W;
336 /* Set the scan pointer for older generations: remember we
337 * still have to scavenge objects that have been promoted. */
338 step->scan = step->hp;
339 step->scan_bd = step->hp_bd;
340 step->to_space = NULL;
342 step->new_large_objects = NULL;
343 step->scavenged_large_objects = NULL;
347 /* -----------------------------------------------------------------------
348 * follow all the roots that we know about:
349 * - mutable lists from each generation > N
350 * we want to *scavenge* these roots, not evacuate them: they're not
351 * going to move in this GC.
352 * Also: do them in reverse generation order. This is because we
353 * often want to promote objects that are pointed to by older
354 * generations early, so we don't have to repeatedly copy them.
355 * Doing the generations in reverse order ensures that we don't end
356 * up in the situation where we want to evac an object to gen 3 and
357 * it has already been evaced to gen 2.
361 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
362 generations[g].saved_mut_list = generations[g].mut_list;
363 generations[g].mut_list = END_MUT_LIST;
366 /* Do the mut-once lists first */
367 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
368 IF_PAR_DEBUG(verbose,
369 printMutOnceList(&generations[g]));
370 scavenge_mut_once_list(&generations[g]);
372 for (st = generations[g].n_steps-1; st >= 0; st--) {
373 scavenge(&generations[g].steps[st]);
377 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
378 IF_PAR_DEBUG(verbose,
379 printMutableList(&generations[g]));
380 scavenge_mutable_list(&generations[g]);
382 for (st = generations[g].n_steps-1; st >= 0; st--) {
383 scavenge(&generations[g].steps[st]);
388 /* follow all the roots that the application knows about.
394 /* And don't forget to mark the TSO if we got here direct from
396 /* Not needed in a seq version?
398 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
402 /* Mark the entries in the GALA table of the parallel system */
403 markLocalGAs(major_gc);
406 /* Mark the weak pointer list, and prepare to detect dead weak
409 old_weak_ptr_list = weak_ptr_list;
410 weak_ptr_list = NULL;
411 weak_done = rtsFalse;
413 /* The all_threads list is like the weak_ptr_list.
414 * See traverse_weak_ptr_list() for the details.
416 old_all_threads = all_threads;
417 all_threads = END_TSO_QUEUE;
418 resurrected_threads = END_TSO_QUEUE;
420 /* Mark the stable pointer table.
422 markStablePtrTable(major_gc);
426 /* ToDo: To fix the caf leak, we need to make the commented out
427 * parts of this code do something sensible - as described in
430 extern void markHugsObjects(void);
435 /* -------------------------------------------------------------------------
436 * Repeatedly scavenge all the areas we know about until there's no
437 * more scavenging to be done.
444 /* scavenge static objects */
445 if (major_gc && static_objects != END_OF_STATIC_LIST) {
447 checkStaticObjects());
451 /* When scavenging the older generations: Objects may have been
452 * evacuated from generations <= N into older generations, and we
453 * need to scavenge these objects. We're going to try to ensure that
454 * any evacuations that occur move the objects into at least the
455 * same generation as the object being scavenged, otherwise we
456 * have to create new entries on the mutable list for the older
460 /* scavenge each step in generations 0..maxgen */
464 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
465 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
466 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
469 step = &generations[gen].steps[st];
471 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
476 if (step->new_large_objects != NULL) {
477 scavenge_large(step);
484 if (flag) { goto loop; }
486 /* must be last... */
487 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
492 /* Final traversal of the weak pointer list (see comment by
493 * cleanUpWeakPtrList below).
495 cleanup_weak_ptr_list(&weak_ptr_list);
497 /* Now see which stable names are still alive.
499 gcStablePtrTable(major_gc);
502 /* Reconstruct the Global Address tables used in GUM */
503 rebuildGAtables(major_gc);
504 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
505 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
508 /* Set the maximum blocks for the oldest generation, based on twice
509 * the amount of live data now, adjusted to fit the maximum heap
512 * This is an approximation, since in the worst case we'll need
513 * twice the amount of live data plus whatever space the other
516 if (RtsFlags.GcFlags.generations > 1) {
518 oldest_gen->max_blocks =
519 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
520 RtsFlags.GcFlags.minOldGenSize);
521 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
522 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
523 if (((int)oldest_gen->max_blocks -
524 (int)oldest_gen->steps[0].to_blocks) <
525 (RtsFlags.GcFlags.pcFreeHeap *
526 RtsFlags.GcFlags.maxHeapSize / 200)) {
533 /* run through all the generations/steps and tidy up
535 copied = new_blocks * BLOCK_SIZE_W;
536 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
539 generations[g].collections++; /* for stats */
542 for (s = 0; s < generations[g].n_steps; s++) {
544 step = &generations[g].steps[s];
546 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
547 /* Tidy the end of the to-space chains */
548 step->hp_bd->free = step->hp;
549 step->hp_bd->link = NULL;
550 /* stats information: how much we copied */
552 copied -= step->hp_bd->start + BLOCK_SIZE_W -
557 /* for generations we collected... */
560 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
562 /* free old memory and shift to-space into from-space for all
563 * the collected steps (except the allocation area). These
564 * freed blocks will probaby be quickly recycled.
566 if (!(g == 0 && s == 0)) {
567 freeChain(step->blocks);
568 step->blocks = step->to_space;
569 step->n_blocks = step->to_blocks;
570 step->to_space = NULL;
572 for (bd = step->blocks; bd != NULL; bd = bd->link) {
573 bd->evacuated = 0; /* now from-space */
577 /* LARGE OBJECTS. The current live large objects are chained on
578 * scavenged_large, having been moved during garbage
579 * collection from large_objects. Any objects left on
580 * large_objects list are therefore dead, so we free them here.
582 for (bd = step->large_objects; bd != NULL; bd = next) {
587 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
590 step->large_objects = step->scavenged_large_objects;
592 /* Set the maximum blocks for this generation, interpolating
593 * between the maximum size of the oldest and youngest
596 * max_blocks = oldgen_max_blocks * G
597 * ----------------------
602 generations[g].max_blocks = (oldest_gen->max_blocks * g)
603 / (RtsFlags.GcFlags.generations-1);
605 generations[g].max_blocks = oldest_gen->max_blocks;
608 /* for older generations... */
611 /* For older generations, we need to append the
612 * scavenged_large_object list (i.e. large objects that have been
613 * promoted during this GC) to the large_object list for that step.
615 for (bd = step->scavenged_large_objects; bd; bd = next) {
618 dbl_link_onto(bd, &step->large_objects);
621 /* add the new blocks we promoted during this GC */
622 step->n_blocks += step->to_blocks;
627 /* Guess the amount of live data for stats. */
630 /* Free the small objects allocated via allocate(), since this will
631 * all have been copied into G0S1 now.
633 if (small_alloc_list != NULL) {
634 freeChain(small_alloc_list);
636 small_alloc_list = NULL;
640 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
642 /* Two-space collector:
643 * Free the old to-space, and estimate the amount of live data.
645 if (RtsFlags.GcFlags.generations == 1) {
648 if (old_to_space != NULL) {
649 freeChain(old_to_space);
651 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
652 bd->evacuated = 0; /* now from-space */
655 /* For a two-space collector, we need to resize the nursery. */
657 /* set up a new nursery. Allocate a nursery size based on a
658 * function of the amount of live data (currently a factor of 2,
659 * should be configurable (ToDo)). Use the blocks from the old
660 * nursery if possible, freeing up any left over blocks.
662 * If we get near the maximum heap size, then adjust our nursery
663 * size accordingly. If the nursery is the same size as the live
664 * data (L), then we need 3L bytes. We can reduce the size of the
665 * nursery to bring the required memory down near 2L bytes.
667 * A normal 2-space collector would need 4L bytes to give the same
668 * performance we get from 3L bytes, reducing to the same
669 * performance at 2L bytes.
671 blocks = g0s0->to_blocks;
673 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
674 RtsFlags.GcFlags.maxHeapSize ) {
675 int adjusted_blocks; /* signed on purpose */
678 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
679 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
680 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
681 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
684 blocks = adjusted_blocks;
687 blocks *= RtsFlags.GcFlags.oldGenFactor;
688 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
689 blocks = RtsFlags.GcFlags.minAllocAreaSize;
692 resizeNursery(blocks);
695 /* Generational collector:
696 * If the user has given us a suggested heap size, adjust our
697 * allocation area to make best use of the memory available.
700 if (RtsFlags.GcFlags.heapSizeSuggestion) {
702 nat needed = calcNeeded(); /* approx blocks needed at next GC */
704 /* Guess how much will be live in generation 0 step 0 next time.
705 * A good approximation is the obtained by finding the
706 * percentage of g0s0 that was live at the last minor GC.
709 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
712 /* Estimate a size for the allocation area based on the
713 * information available. We might end up going slightly under
714 * or over the suggested heap size, but we should be pretty
717 * Formula: suggested - needed
718 * ----------------------------
719 * 1 + g0s0_pcnt_kept/100
721 * where 'needed' is the amount of memory needed at the next
722 * collection for collecting all steps except g0s0.
725 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
726 (100 + (int)g0s0_pcnt_kept);
728 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
729 blocks = RtsFlags.GcFlags.minAllocAreaSize;
732 resizeNursery((nat)blocks);
736 /* mark the garbage collected CAFs as dead */
738 if (major_gc) { gcCAFs(); }
741 /* zero the scavenged static object list */
743 zero_static_object_list(scavenged_static_objects);
750 /* start any pending finalizers */
751 scheduleFinalizers(old_weak_ptr_list);
753 /* send exceptions to any threads which were about to die */
754 resurrectThreads(resurrected_threads);
756 /* check sanity after GC */
757 IF_DEBUG(sanity, checkSanity(N));
759 /* extra GC trace info */
760 IF_DEBUG(gc, stat_describe_gens());
763 /* symbol-table based profiling */
764 /* heapCensus(to_space); */ /* ToDo */
767 /* restore enclosing cost centre */
773 /* check for memory leaks if sanity checking is on */
774 IF_DEBUG(sanity, memInventory());
776 #ifdef RTS_GTK_VISUALS
777 if (RtsFlags.GcFlags.visuals) {
778 updateFrontPanelAfterGC( N, live );
782 /* ok, GC over: tell the stats department what happened. */
783 stat_endGC(allocated, collected, live, copied, N);
786 //@node Weak Pointers, Evacuation, Garbage Collect
787 //@subsection Weak Pointers
789 /* -----------------------------------------------------------------------------
792 traverse_weak_ptr_list is called possibly many times during garbage
793 collection. It returns a flag indicating whether it did any work
794 (i.e. called evacuate on any live pointers).
796 Invariant: traverse_weak_ptr_list is called when the heap is in an
797 idempotent state. That means that there are no pending
798 evacuate/scavenge operations. This invariant helps the weak
799 pointer code decide which weak pointers are dead - if there are no
800 new live weak pointers, then all the currently unreachable ones are
803 For generational GC: we just don't try to finalize weak pointers in
804 older generations than the one we're collecting. This could
805 probably be optimised by keeping per-generation lists of weak
806 pointers, but for a few weak pointers this scheme will work.
807 -------------------------------------------------------------------------- */
808 //@cindex traverse_weak_ptr_list
811 traverse_weak_ptr_list(void)
813 StgWeak *w, **last_w, *next_w;
815 rtsBool flag = rtsFalse;
817 if (weak_done) { return rtsFalse; }
819 /* doesn't matter where we evacuate values/finalizers to, since
820 * these pointers are treated as roots (iff the keys are alive).
824 last_w = &old_weak_ptr_list;
825 for (w = old_weak_ptr_list; w; w = next_w) {
827 /* First, this weak pointer might have been evacuated. If so,
828 * remove the forwarding pointer from the weak_ptr_list.
830 if (get_itbl(w)->type == EVACUATED) {
831 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
835 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
836 * called on a live weak pointer object. Just remove it.
838 if (w->header.info == &stg_DEAD_WEAK_info) {
839 next_w = ((StgDeadWeak *)w)->link;
844 ASSERT(get_itbl(w)->type == WEAK);
846 /* Now, check whether the key is reachable.
848 if ((new = isAlive(w->key))) {
850 /* evacuate the value and finalizer */
851 w->value = evacuate(w->value);
852 w->finalizer = evacuate(w->finalizer);
853 /* remove this weak ptr from the old_weak_ptr list */
855 /* and put it on the new weak ptr list */
857 w->link = weak_ptr_list;
860 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
870 /* Now deal with the all_threads list, which behaves somewhat like
871 * the weak ptr list. If we discover any threads that are about to
872 * become garbage, we wake them up and administer an exception.
875 StgTSO *t, *tmp, *next, **prev;
877 prev = &old_all_threads;
878 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
880 /* Threads which have finished or died get dropped from
883 switch (t->what_next) {
884 case ThreadRelocated:
890 next = t->global_link;
896 /* Threads which have already been determined to be alive are
897 * moved onto the all_threads list.
899 (StgClosure *)tmp = isAlive((StgClosure *)t);
901 next = tmp->global_link;
902 tmp->global_link = all_threads;
906 prev = &(t->global_link);
907 next = t->global_link;
912 /* If we didn't make any changes, then we can go round and kill all
913 * the dead weak pointers. The old_weak_ptr list is used as a list
914 * of pending finalizers later on.
916 if (flag == rtsFalse) {
917 cleanup_weak_ptr_list(&old_weak_ptr_list);
918 for (w = old_weak_ptr_list; w; w = w->link) {
919 w->finalizer = evacuate(w->finalizer);
922 /* And resurrect any threads which were about to become garbage.
925 StgTSO *t, *tmp, *next;
926 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
927 next = t->global_link;
928 (StgClosure *)tmp = evacuate((StgClosure *)t);
929 tmp->global_link = resurrected_threads;
930 resurrected_threads = tmp;
940 /* -----------------------------------------------------------------------------
941 After GC, the live weak pointer list may have forwarding pointers
942 on it, because a weak pointer object was evacuated after being
943 moved to the live weak pointer list. We remove those forwarding
946 Also, we don't consider weak pointer objects to be reachable, but
947 we must nevertheless consider them to be "live" and retain them.
948 Therefore any weak pointer objects which haven't as yet been
949 evacuated need to be evacuated now.
950 -------------------------------------------------------------------------- */
952 //@cindex cleanup_weak_ptr_list
955 cleanup_weak_ptr_list ( StgWeak **list )
957 StgWeak *w, **last_w;
960 for (w = *list; w; w = w->link) {
962 if (get_itbl(w)->type == EVACUATED) {
963 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
967 if (Bdescr((P_)w)->evacuated == 0) {
968 (StgClosure *)w = evacuate((StgClosure *)w);
975 /* -----------------------------------------------------------------------------
976 isAlive determines whether the given closure is still alive (after
977 a garbage collection) or not. It returns the new address of the
978 closure if it is alive, or NULL otherwise.
979 -------------------------------------------------------------------------- */
984 isAlive(StgClosure *p)
986 const StgInfoTable *info;
993 /* ToDo: for static closures, check the static link field.
994 * Problem here is that we sometimes don't set the link field, eg.
995 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
998 /* ignore closures in generations that we're not collecting. */
999 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
1003 switch (info->type) {
1008 case IND_OLDGEN: /* rely on compatible layout with StgInd */
1009 case IND_OLDGEN_PERM:
1010 /* follow indirections */
1011 p = ((StgInd *)p)->indirectee;
1016 return ((StgEvacuated *)p)->evacuee;
1019 size = arr_words_sizeW((StgArrWords *)p);
1023 case MUT_ARR_PTRS_FROZEN:
1024 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1028 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1029 p = (StgClosure *)((StgTSO *)p)->link;
1033 size = tso_sizeW((StgTSO *)p);
1035 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1036 && Bdescr((P_)p)->evacuated)
1050 MarkRoot(StgClosure *root)
1052 # if 0 && defined(PAR) && defined(DEBUG)
1053 StgClosure *foo = evacuate(root);
1054 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1055 ASSERT(isAlive(foo)); // must be in to-space
1058 return evacuate(root);
1063 static void addBlock(step *step)
1065 bdescr *bd = allocBlock();
1066 bd->gen = step->gen;
1069 if (step->gen->no <= N) {
1075 step->hp_bd->free = step->hp;
1076 step->hp_bd->link = bd;
1077 step->hp = bd->start;
1078 step->hpLim = step->hp + BLOCK_SIZE_W;
1084 //@cindex upd_evacuee
1086 static __inline__ void
1087 upd_evacuee(StgClosure *p, StgClosure *dest)
1089 p->header.info = &stg_EVACUATED_info;
1090 ((StgEvacuated *)p)->evacuee = dest;
1095 static __inline__ StgClosure *
1096 copy(StgClosure *src, nat size, step *step)
1100 TICK_GC_WORDS_COPIED(size);
1101 /* Find out where we're going, using the handy "to" pointer in
1102 * the step of the source object. If it turns out we need to
1103 * evacuate to an older generation, adjust it here (see comment
1106 if (step->gen->no < evac_gen) {
1107 #ifdef NO_EAGER_PROMOTION
1108 failed_to_evac = rtsTrue;
1110 step = &generations[evac_gen].steps[0];
1114 /* chain a new block onto the to-space for the destination step if
1117 if (step->hp + size >= step->hpLim) {
1121 for(to = step->hp, from = (P_)src; size>0; --size) {
1127 upd_evacuee(src,(StgClosure *)dest);
1128 return (StgClosure *)dest;
1131 /* Special version of copy() for when we only want to copy the info
1132 * pointer of an object, but reserve some padding after it. This is
1133 * used to optimise evacuation of BLACKHOLEs.
1138 static __inline__ StgClosure *
1139 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1143 TICK_GC_WORDS_COPIED(size_to_copy);
1144 if (step->gen->no < evac_gen) {
1145 #ifdef NO_EAGER_PROMOTION
1146 failed_to_evac = rtsTrue;
1148 step = &generations[evac_gen].steps[0];
1152 if (step->hp + size_to_reserve >= step->hpLim) {
1156 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1161 step->hp += size_to_reserve;
1162 upd_evacuee(src,(StgClosure *)dest);
1163 return (StgClosure *)dest;
1166 //@node Evacuation, Scavenging, Weak Pointers
1167 //@subsection Evacuation
1169 /* -----------------------------------------------------------------------------
1170 Evacuate a large object
1172 This just consists of removing the object from the (doubly-linked)
1173 large_alloc_list, and linking it on to the (singly-linked)
1174 new_large_objects list, from where it will be scavenged later.
1176 Convention: bd->evacuated is /= 0 for a large object that has been
1177 evacuated, or 0 otherwise.
1178 -------------------------------------------------------------------------- */
1180 //@cindex evacuate_large
1183 evacuate_large(StgPtr p, rtsBool mutable)
1185 bdescr *bd = Bdescr(p);
1188 /* should point to the beginning of the block */
1189 ASSERT(((W_)p & BLOCK_MASK) == 0);
1191 /* already evacuated? */
1192 if (bd->evacuated) {
1193 /* Don't forget to set the failed_to_evac flag if we didn't get
1194 * the desired destination (see comments in evacuate()).
1196 if (bd->gen->no < evac_gen) {
1197 failed_to_evac = rtsTrue;
1198 TICK_GC_FAILED_PROMOTION();
1204 /* remove from large_object list */
1206 bd->back->link = bd->link;
1207 } else { /* first object in the list */
1208 step->large_objects = bd->link;
1211 bd->link->back = bd->back;
1214 /* link it on to the evacuated large object list of the destination step
1216 step = bd->step->to;
1217 if (step->gen->no < evac_gen) {
1218 #ifdef NO_EAGER_PROMOTION
1219 failed_to_evac = rtsTrue;
1221 step = &generations[evac_gen].steps[0];
1226 bd->gen = step->gen;
1227 bd->link = step->new_large_objects;
1228 step->new_large_objects = bd;
1232 recordMutable((StgMutClosure *)p);
1236 /* -----------------------------------------------------------------------------
1237 Adding a MUT_CONS to an older generation.
1239 This is necessary from time to time when we end up with an
1240 old-to-new generation pointer in a non-mutable object. We defer
1241 the promotion until the next GC.
1242 -------------------------------------------------------------------------- */
1247 mkMutCons(StgClosure *ptr, generation *gen)
1252 step = &gen->steps[0];
1254 /* chain a new block onto the to-space for the destination step if
1257 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1261 q = (StgMutVar *)step->hp;
1262 step->hp += sizeofW(StgMutVar);
1264 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1266 recordOldToNewPtrs((StgMutClosure *)q);
1268 return (StgClosure *)q;
1271 /* -----------------------------------------------------------------------------
1274 This is called (eventually) for every live object in the system.
1276 The caller to evacuate specifies a desired generation in the
1277 evac_gen global variable. The following conditions apply to
1278 evacuating an object which resides in generation M when we're
1279 collecting up to generation N
1283 else evac to step->to
1285 if M < evac_gen evac to evac_gen, step 0
1287 if the object is already evacuated, then we check which generation
1290 if M >= evac_gen do nothing
1291 if M < evac_gen set failed_to_evac flag to indicate that we
1292 didn't manage to evacuate this object into evac_gen.
1294 -------------------------------------------------------------------------- */
1298 evacuate(StgClosure *q)
1303 const StgInfoTable *info;
1306 if (HEAP_ALLOCED(q)) {
1308 if (bd->gen->no > N) {
1309 /* Can't evacuate this object, because it's in a generation
1310 * older than the ones we're collecting. Let's hope that it's
1311 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1313 if (bd->gen->no < evac_gen) {
1315 failed_to_evac = rtsTrue;
1316 TICK_GC_FAILED_PROMOTION();
1320 step = bd->step->to;
1323 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1326 /* make sure the info pointer is into text space */
1327 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1328 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1331 if (info->type==RBH) {
1332 info = REVERT_INFOPTR(info);
1334 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1335 q, info_type(q), info, info_type_by_ip(info)));
1339 switch (info -> type) {
1342 ASSERT(q->header.info != &stg_MUT_CONS_info);
1344 to = copy(q,sizeW_fromITBL(info),step);
1345 recordMutable((StgMutClosure *)to);
1350 StgWord w = (StgWord)q->payload[0];
1351 if (q->header.info == Czh_con_info &&
1352 /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */
1353 (StgChar)w <= MAX_CHARLIKE) {
1354 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1356 if (q->header.info == Izh_con_info &&
1357 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1358 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1360 /* else, fall through ... */
1366 return copy(q,sizeofW(StgHeader)+1,step);
1368 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1373 #ifdef NO_PROMOTE_THUNKS
1374 if (bd->gen->no == 0 &&
1375 bd->step->no != 0 &&
1376 bd->step->no == bd->gen->n_steps-1) {
1380 return copy(q,sizeofW(StgHeader)+2,step);
1388 return copy(q,sizeofW(StgHeader)+2,step);
1394 case IND_OLDGEN_PERM:
1401 return copy(q,sizeW_fromITBL(info),step);
1404 case SE_CAF_BLACKHOLE:
1407 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1410 to = copy(q,BLACKHOLE_sizeW(),step);
1411 recordMutable((StgMutClosure *)to);
1414 case THUNK_SELECTOR:
1416 const StgInfoTable* selectee_info;
1417 StgClosure* selectee = ((StgSelector*)q)->selectee;
1420 selectee_info = get_itbl(selectee);
1421 switch (selectee_info->type) {
1430 StgWord32 offset = info->layout.selector_offset;
1432 /* check that the size is in range */
1434 (StgWord32)(selectee_info->layout.payload.ptrs +
1435 selectee_info->layout.payload.nptrs));
1437 /* perform the selection! */
1438 q = selectee->payload[offset];
1440 /* if we're already in to-space, there's no need to continue
1441 * with the evacuation, just update the source address with
1442 * a pointer to the (evacuated) constructor field.
1444 if (HEAP_ALLOCED(q)) {
1445 bdescr *bd = Bdescr((P_)q);
1446 if (bd->evacuated) {
1447 if (bd->gen->no < evac_gen) {
1448 failed_to_evac = rtsTrue;
1449 TICK_GC_FAILED_PROMOTION();
1455 /* otherwise, carry on and evacuate this constructor field,
1456 * (but not the constructor itself)
1465 case IND_OLDGEN_PERM:
1466 selectee = ((StgInd *)selectee)->indirectee;
1470 selectee = ((StgCAF *)selectee)->value;
1474 selectee = ((StgEvacuated *)selectee)->evacuee;
1485 case THUNK_SELECTOR:
1486 /* aargh - do recursively???? */
1489 case SE_CAF_BLACKHOLE:
1493 /* not evaluated yet */
1497 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1498 (int)(selectee_info->type));
1501 return copy(q,THUNK_SELECTOR_sizeW(),step);
1505 /* follow chains of indirections, don't evacuate them */
1506 q = ((StgInd*)q)->indirectee;
1510 if (info->srt_len > 0 && major_gc &&
1511 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1512 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1513 static_objects = (StgClosure *)q;
1518 if (info->srt_len > 0 && major_gc &&
1519 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1520 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1521 static_objects = (StgClosure *)q;
1526 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1527 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1528 static_objects = (StgClosure *)q;
1533 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1534 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1535 static_objects = (StgClosure *)q;
1539 case CONSTR_INTLIKE:
1540 case CONSTR_CHARLIKE:
1541 case CONSTR_NOCAF_STATIC:
1542 /* no need to put these on the static linked list, they don't need
1557 /* shouldn't see these */
1558 barf("evacuate: stack frame at %p\n", q);
1562 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1563 * of stack, tagging and all.
1565 * They can be larger than a block in size. Both are only
1566 * allocated via allocate(), so they should be chained on to the
1567 * large_object list.
1570 nat size = pap_sizeW((StgPAP*)q);
1571 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1572 evacuate_large((P_)q, rtsFalse);
1575 return copy(q,size,step);
1580 /* Already evacuated, just return the forwarding address.
1581 * HOWEVER: if the requested destination generation (evac_gen) is
1582 * older than the actual generation (because the object was
1583 * already evacuated to a younger generation) then we have to
1584 * set the failed_to_evac flag to indicate that we couldn't
1585 * manage to promote the object to the desired generation.
1587 if (evac_gen > 0) { /* optimisation */
1588 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1589 if (Bdescr((P_)p)->gen->no < evac_gen) {
1590 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1591 failed_to_evac = rtsTrue;
1592 TICK_GC_FAILED_PROMOTION();
1595 return ((StgEvacuated*)q)->evacuee;
1599 nat size = arr_words_sizeW((StgArrWords *)q);
1601 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1602 evacuate_large((P_)q, rtsFalse);
1605 /* just copy the block */
1606 return copy(q,size,step);
1611 case MUT_ARR_PTRS_FROZEN:
1613 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1615 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1616 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1619 /* just copy the block */
1620 to = copy(q,size,step);
1621 if (info->type == MUT_ARR_PTRS) {
1622 recordMutable((StgMutClosure *)to);
1630 StgTSO *tso = (StgTSO *)q;
1631 nat size = tso_sizeW(tso);
1634 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1636 if (tso->what_next == ThreadRelocated) {
1637 q = (StgClosure *)tso->link;
1641 /* Large TSOs don't get moved, so no relocation is required.
1643 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1644 evacuate_large((P_)q, rtsTrue);
1647 /* To evacuate a small TSO, we need to relocate the update frame
1651 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1653 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1655 /* relocate the stack pointers... */
1656 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1657 new_tso->sp = (StgPtr)new_tso->sp + diff;
1659 relocate_TSO(tso, new_tso);
1661 recordMutable((StgMutClosure *)new_tso);
1662 return (StgClosure *)new_tso;
1667 case RBH: // cf. BLACKHOLE_BQ
1669 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1670 to = copy(q,BLACKHOLE_sizeW(),step);
1671 //ToDo: derive size etc from reverted IP
1672 //to = copy(q,size,step);
1673 recordMutable((StgMutClosure *)to);
1675 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1676 q, info_type(q), to, info_type(to)));
1681 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1682 to = copy(q,sizeofW(StgBlockedFetch),step);
1684 belch("@@ evacuate: %p (%s) to %p (%s)",
1685 q, info_type(q), to, info_type(to)));
1689 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1690 to = copy(q,sizeofW(StgFetchMe),step);
1692 belch("@@ evacuate: %p (%s) to %p (%s)",
1693 q, info_type(q), to, info_type(to)));
1697 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1698 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1700 belch("@@ evacuate: %p (%s) to %p (%s)",
1701 q, info_type(q), to, info_type(to)));
1706 barf("evacuate: strange closure type %d", (int)(info->type));
1712 /* -----------------------------------------------------------------------------
1713 relocate_TSO is called just after a TSO has been copied from src to
1714 dest. It adjusts the update frame list for the new location.
1715 -------------------------------------------------------------------------- */
1716 //@cindex relocate_TSO
1719 relocate_TSO(StgTSO *src, StgTSO *dest)
1726 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1730 while ((P_)su < dest->stack + dest->stack_size) {
1731 switch (get_itbl(su)->type) {
1733 /* GCC actually manages to common up these three cases! */
1736 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1741 cf = (StgCatchFrame *)su;
1742 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1747 sf = (StgSeqFrame *)su;
1748 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1757 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1765 //@node Scavenging, Reverting CAFs, Evacuation
1766 //@subsection Scavenging
1768 //@cindex scavenge_srt
1771 scavenge_srt(const StgInfoTable *info)
1773 StgClosure **srt, **srt_end;
1775 /* evacuate the SRT. If srt_len is zero, then there isn't an
1776 * srt field in the info table. That's ok, because we'll
1777 * never dereference it.
1779 srt = (StgClosure **)(info->srt);
1780 srt_end = srt + info->srt_len;
1781 for (; srt < srt_end; srt++) {
1782 /* Special-case to handle references to closures hiding out in DLLs, since
1783 double indirections required to get at those. The code generator knows
1784 which is which when generating the SRT, so it stores the (indirect)
1785 reference to the DLL closure in the table by first adding one to it.
1786 We check for this here, and undo the addition before evacuating it.
1788 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1789 closure that's fixed at link-time, and no extra magic is required.
1791 #ifdef ENABLE_WIN32_DLL_SUPPORT
1792 if ( (unsigned long)(*srt) & 0x1 ) {
1793 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1803 /* -----------------------------------------------------------------------------
1805 -------------------------------------------------------------------------- */
1808 scavengeTSO (StgTSO *tso)
1810 /* chase the link field for any TSOs on the same queue */
1811 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1812 if ( tso->why_blocked == BlockedOnMVar
1813 || tso->why_blocked == BlockedOnBlackHole
1814 || tso->why_blocked == BlockedOnException
1816 || tso->why_blocked == BlockedOnGA
1817 || tso->why_blocked == BlockedOnGA_NoSend
1820 tso->block_info.closure = evacuate(tso->block_info.closure);
1822 if ( tso->blocked_exceptions != NULL ) {
1823 tso->blocked_exceptions =
1824 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1826 /* scavenge this thread's stack */
1827 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1830 /* -----------------------------------------------------------------------------
1831 Scavenge a given step until there are no more objects in this step
1834 evac_gen is set by the caller to be either zero (for a step in a
1835 generation < N) or G where G is the generation of the step being
1838 We sometimes temporarily change evac_gen back to zero if we're
1839 scavenging a mutable object where early promotion isn't such a good
1841 -------------------------------------------------------------------------- */
1845 scavenge(step *step)
1848 const StgInfoTable *info;
1850 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1855 failed_to_evac = rtsFalse;
1857 /* scavenge phase - standard breadth-first scavenging of the
1861 while (bd != step->hp_bd || p < step->hp) {
1863 /* If we're at the end of this block, move on to the next block */
1864 if (bd != step->hp_bd && p == bd->free) {
1870 q = p; /* save ptr to object */
1872 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1873 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1875 info = get_itbl((StgClosure *)p);
1877 if (info->type==RBH)
1878 info = REVERT_INFOPTR(info);
1881 switch (info -> type) {
1884 /* treat MVars specially, because we don't want to evacuate the
1885 * mut_link field in the middle of the closure.
1888 StgMVar *mvar = ((StgMVar *)p);
1890 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1891 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1892 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1893 p += sizeofW(StgMVar);
1894 evac_gen = saved_evac_gen;
1902 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1903 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1904 p += sizeofW(StgHeader) + 2;
1909 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1910 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1916 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1917 p += sizeofW(StgHeader) + 1;
1922 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1928 p += sizeofW(StgHeader) + 1;
1935 p += sizeofW(StgHeader) + 2;
1942 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1943 p += sizeofW(StgHeader) + 2;
1959 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1960 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1961 (StgClosure *)*p = evacuate((StgClosure *)*p);
1963 p += info->layout.payload.nptrs;
1968 if (step->gen->no != 0) {
1969 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
1972 case IND_OLDGEN_PERM:
1973 ((StgIndOldGen *)p)->indirectee =
1974 evacuate(((StgIndOldGen *)p)->indirectee);
1975 if (failed_to_evac) {
1976 failed_to_evac = rtsFalse;
1977 recordOldToNewPtrs((StgMutClosure *)p);
1979 p += sizeofW(StgIndOldGen);
1984 StgCAF *caf = (StgCAF *)p;
1986 caf->body = evacuate(caf->body);
1987 if (failed_to_evac) {
1988 failed_to_evac = rtsFalse;
1989 recordOldToNewPtrs((StgMutClosure *)p);
1991 caf->mut_link = NULL;
1993 p += sizeofW(StgCAF);
1999 StgCAF *caf = (StgCAF *)p;
2001 caf->body = evacuate(caf->body);
2002 caf->value = evacuate(caf->value);
2003 if (failed_to_evac) {
2004 failed_to_evac = rtsFalse;
2005 recordOldToNewPtrs((StgMutClosure *)p);
2007 caf->mut_link = NULL;
2009 p += sizeofW(StgCAF);
2014 /* ignore MUT_CONSs */
2015 if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) {
2017 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2018 evac_gen = saved_evac_gen;
2020 p += sizeofW(StgMutVar);
2024 case SE_CAF_BLACKHOLE:
2027 p += BLACKHOLE_sizeW();
2032 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2033 (StgClosure *)bh->blocking_queue =
2034 evacuate((StgClosure *)bh->blocking_queue);
2035 if (failed_to_evac) {
2036 failed_to_evac = rtsFalse;
2037 recordMutable((StgMutClosure *)bh);
2039 p += BLACKHOLE_sizeW();
2043 case THUNK_SELECTOR:
2045 StgSelector *s = (StgSelector *)p;
2046 s->selectee = evacuate(s->selectee);
2047 p += THUNK_SELECTOR_sizeW();
2053 barf("scavenge:IND???\n");
2055 case CONSTR_INTLIKE:
2056 case CONSTR_CHARLIKE:
2058 case CONSTR_NOCAF_STATIC:
2062 /* Shouldn't see a static object here. */
2063 barf("scavenge: STATIC object\n");
2075 /* Shouldn't see stack frames here. */
2076 barf("scavenge: stack frame\n");
2078 case AP_UPD: /* same as PAPs */
2080 /* Treat a PAP just like a section of stack, not forgetting to
2081 * evacuate the function pointer too...
2084 StgPAP* pap = (StgPAP *)p;
2086 pap->fun = evacuate(pap->fun);
2087 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2088 p += pap_sizeW(pap);
2093 /* nothing to follow */
2094 p += arr_words_sizeW((StgArrWords *)p);
2098 /* follow everything */
2102 evac_gen = 0; /* repeatedly mutable */
2103 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2104 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2105 (StgClosure *)*p = evacuate((StgClosure *)*p);
2107 evac_gen = saved_evac_gen;
2111 case MUT_ARR_PTRS_FROZEN:
2112 /* follow everything */
2114 StgPtr start = p, next;
2116 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2117 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2118 (StgClosure *)*p = evacuate((StgClosure *)*p);
2120 if (failed_to_evac) {
2121 /* we can do this easier... */
2122 recordMutable((StgMutClosure *)start);
2123 failed_to_evac = rtsFalse;
2130 StgTSO *tso = (StgTSO *)p;
2133 evac_gen = saved_evac_gen;
2134 p += tso_sizeW(tso);
2139 case RBH: // cf. BLACKHOLE_BQ
2141 // nat size, ptrs, nonptrs, vhs;
2143 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2144 StgRBH *rbh = (StgRBH *)p;
2145 (StgClosure *)rbh->blocking_queue =
2146 evacuate((StgClosure *)rbh->blocking_queue);
2147 if (failed_to_evac) {
2148 failed_to_evac = rtsFalse;
2149 recordMutable((StgMutClosure *)rbh);
2152 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2153 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2154 // ToDo: use size of reverted closure here!
2155 p += BLACKHOLE_sizeW();
2161 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2162 /* follow the pointer to the node which is being demanded */
2163 (StgClosure *)bf->node =
2164 evacuate((StgClosure *)bf->node);
2165 /* follow the link to the rest of the blocking queue */
2166 (StgClosure *)bf->link =
2167 evacuate((StgClosure *)bf->link);
2168 if (failed_to_evac) {
2169 failed_to_evac = rtsFalse;
2170 recordMutable((StgMutClosure *)bf);
2173 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2174 bf, info_type((StgClosure *)bf),
2175 bf->node, info_type(bf->node)));
2176 p += sizeofW(StgBlockedFetch);
2182 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2183 p, info_type((StgClosure *)p)));
2184 p += sizeofW(StgFetchMe);
2185 break; // nothing to do in this case
2187 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2189 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2190 (StgClosure *)fmbq->blocking_queue =
2191 evacuate((StgClosure *)fmbq->blocking_queue);
2192 if (failed_to_evac) {
2193 failed_to_evac = rtsFalse;
2194 recordMutable((StgMutClosure *)fmbq);
2197 belch("@@ scavenge: %p (%s) exciting, isn't it",
2198 p, info_type((StgClosure *)p)));
2199 p += sizeofW(StgFetchMeBlockingQueue);
2205 barf("scavenge: unimplemented/strange closure type %d @ %p",
2209 barf("scavenge: unimplemented/strange closure type %d @ %p",
2213 /* If we didn't manage to promote all the objects pointed to by
2214 * the current object, then we have to designate this object as
2215 * mutable (because it contains old-to-new generation pointers).
2217 if (failed_to_evac) {
2218 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2219 failed_to_evac = rtsFalse;
2227 /* -----------------------------------------------------------------------------
2228 Scavenge one object.
2230 This is used for objects that are temporarily marked as mutable
2231 because they contain old-to-new generation pointers. Only certain
2232 objects can have this property.
2233 -------------------------------------------------------------------------- */
2234 //@cindex scavenge_one
2237 scavenge_one(StgClosure *p)
2239 const StgInfoTable *info;
2242 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2243 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2248 if (info->type==RBH)
2249 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2252 switch (info -> type) {
2255 case FUN_1_0: /* hardly worth specialising these guys */
2275 case IND_OLDGEN_PERM:
2280 end = (P_)p->payload + info->layout.payload.ptrs;
2281 for (q = (P_)p->payload; q < end; q++) {
2282 (StgClosure *)*q = evacuate((StgClosure *)*q);
2288 case SE_CAF_BLACKHOLE:
2293 case THUNK_SELECTOR:
2295 StgSelector *s = (StgSelector *)p;
2296 s->selectee = evacuate(s->selectee);
2300 case AP_UPD: /* same as PAPs */
2302 /* Treat a PAP just like a section of stack, not forgetting to
2303 * evacuate the function pointer too...
2306 StgPAP* pap = (StgPAP *)p;
2308 pap->fun = evacuate(pap->fun);
2309 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2314 /* This might happen if for instance a MUT_CONS was pointing to a
2315 * THUNK which has since been updated. The IND_OLDGEN will
2316 * be on the mutable list anyway, so we don't need to do anything
2322 barf("scavenge_one: strange object %d", (int)(info->type));
2325 no_luck = failed_to_evac;
2326 failed_to_evac = rtsFalse;
2331 /* -----------------------------------------------------------------------------
2332 Scavenging mutable lists.
2334 We treat the mutable list of each generation > N (i.e. all the
2335 generations older than the one being collected) as roots. We also
2336 remove non-mutable objects from the mutable list at this point.
2337 -------------------------------------------------------------------------- */
2338 //@cindex scavenge_mut_once_list
2341 scavenge_mut_once_list(generation *gen)
2343 const StgInfoTable *info;
2344 StgMutClosure *p, *next, *new_list;
2346 p = gen->mut_once_list;
2347 new_list = END_MUT_LIST;
2351 failed_to_evac = rtsFalse;
2353 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2355 /* make sure the info pointer is into text space */
2356 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2357 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2361 if (info->type==RBH)
2362 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2364 switch(info->type) {
2367 case IND_OLDGEN_PERM:
2369 /* Try to pull the indirectee into this generation, so we can
2370 * remove the indirection from the mutable list.
2372 ((StgIndOldGen *)p)->indirectee =
2373 evacuate(((StgIndOldGen *)p)->indirectee);
2376 if (RtsFlags.DebugFlags.gc)
2377 /* Debugging code to print out the size of the thing we just
2381 StgPtr start = gen->steps[0].scan;
2382 bdescr *start_bd = gen->steps[0].scan_bd;
2384 scavenge(&gen->steps[0]);
2385 if (start_bd != gen->steps[0].scan_bd) {
2386 size += (P_)BLOCK_ROUND_UP(start) - start;
2387 start_bd = start_bd->link;
2388 while (start_bd != gen->steps[0].scan_bd) {
2389 size += BLOCK_SIZE_W;
2390 start_bd = start_bd->link;
2392 size += gen->steps[0].scan -
2393 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2395 size = gen->steps[0].scan - start;
2397 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2401 /* failed_to_evac might happen if we've got more than two
2402 * generations, we're collecting only generation 0, the
2403 * indirection resides in generation 2 and the indirectee is
2406 if (failed_to_evac) {
2407 failed_to_evac = rtsFalse;
2408 p->mut_link = new_list;
2411 /* the mut_link field of an IND_STATIC is overloaded as the
2412 * static link field too (it just so happens that we don't need
2413 * both at the same time), so we need to NULL it out when
2414 * removing this object from the mutable list because the static
2415 * link fields are all assumed to be NULL before doing a major
2423 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2424 * it from the mutable list if possible by promoting whatever it
2427 ASSERT(p->header.info == &stg_MUT_CONS_info);
2428 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2429 /* didn't manage to promote everything, so put the
2430 * MUT_CONS back on the list.
2432 p->mut_link = new_list;
2439 StgCAF *caf = (StgCAF *)p;
2440 caf->body = evacuate(caf->body);
2441 caf->value = evacuate(caf->value);
2442 if (failed_to_evac) {
2443 failed_to_evac = rtsFalse;
2444 p->mut_link = new_list;
2454 StgCAF *caf = (StgCAF *)p;
2455 caf->body = evacuate(caf->body);
2456 if (failed_to_evac) {
2457 failed_to_evac = rtsFalse;
2458 p->mut_link = new_list;
2467 /* shouldn't have anything else on the mutables list */
2468 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2472 gen->mut_once_list = new_list;
2475 //@cindex scavenge_mutable_list
2478 scavenge_mutable_list(generation *gen)
2480 const StgInfoTable *info;
2481 StgMutClosure *p, *next;
2483 p = gen->saved_mut_list;
2487 failed_to_evac = rtsFalse;
2489 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2491 /* make sure the info pointer is into text space */
2492 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2493 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2497 if (info->type==RBH)
2498 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2500 switch(info->type) {
2502 case MUT_ARR_PTRS_FROZEN:
2503 /* remove this guy from the mutable list, but follow the ptrs
2504 * anyway (and make sure they get promoted to this gen).
2509 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2511 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2512 (StgClosure *)*q = evacuate((StgClosure *)*q);
2516 if (failed_to_evac) {
2517 failed_to_evac = rtsFalse;
2518 p->mut_link = gen->mut_list;
2525 /* follow everything */
2526 p->mut_link = gen->mut_list;
2531 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2532 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2533 (StgClosure *)*q = evacuate((StgClosure *)*q);
2539 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2540 * it from the mutable list if possible by promoting whatever it
2543 ASSERT(p->header.info != &stg_MUT_CONS_info);
2544 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2545 p->mut_link = gen->mut_list;
2551 StgMVar *mvar = (StgMVar *)p;
2552 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2553 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2554 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2555 p->mut_link = gen->mut_list;
2562 StgTSO *tso = (StgTSO *)p;
2566 /* Don't take this TSO off the mutable list - it might still
2567 * point to some younger objects (because we set evac_gen to 0
2570 tso->mut_link = gen->mut_list;
2571 gen->mut_list = (StgMutClosure *)tso;
2577 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2578 (StgClosure *)bh->blocking_queue =
2579 evacuate((StgClosure *)bh->blocking_queue);
2580 p->mut_link = gen->mut_list;
2585 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2588 case IND_OLDGEN_PERM:
2589 /* Try to pull the indirectee into this generation, so we can
2590 * remove the indirection from the mutable list.
2593 ((StgIndOldGen *)p)->indirectee =
2594 evacuate(((StgIndOldGen *)p)->indirectee);
2597 if (failed_to_evac) {
2598 failed_to_evac = rtsFalse;
2599 p->mut_link = gen->mut_once_list;
2600 gen->mut_once_list = p;
2607 // HWL: check whether all of these are necessary
2609 case RBH: // cf. BLACKHOLE_BQ
2611 // nat size, ptrs, nonptrs, vhs;
2613 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2614 StgRBH *rbh = (StgRBH *)p;
2615 (StgClosure *)rbh->blocking_queue =
2616 evacuate((StgClosure *)rbh->blocking_queue);
2617 if (failed_to_evac) {
2618 failed_to_evac = rtsFalse;
2619 recordMutable((StgMutClosure *)rbh);
2621 // ToDo: use size of reverted closure here!
2622 p += BLACKHOLE_sizeW();
2628 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2629 /* follow the pointer to the node which is being demanded */
2630 (StgClosure *)bf->node =
2631 evacuate((StgClosure *)bf->node);
2632 /* follow the link to the rest of the blocking queue */
2633 (StgClosure *)bf->link =
2634 evacuate((StgClosure *)bf->link);
2635 if (failed_to_evac) {
2636 failed_to_evac = rtsFalse;
2637 recordMutable((StgMutClosure *)bf);
2639 p += sizeofW(StgBlockedFetch);
2644 p += sizeofW(StgFetchMe);
2645 break; // nothing to do in this case
2647 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2649 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2650 (StgClosure *)fmbq->blocking_queue =
2651 evacuate((StgClosure *)fmbq->blocking_queue);
2652 if (failed_to_evac) {
2653 failed_to_evac = rtsFalse;
2654 recordMutable((StgMutClosure *)fmbq);
2656 p += sizeofW(StgFetchMeBlockingQueue);
2662 /* shouldn't have anything else on the mutables list */
2663 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2668 //@cindex scavenge_static
2671 scavenge_static(void)
2673 StgClosure* p = static_objects;
2674 const StgInfoTable *info;
2676 /* Always evacuate straight to the oldest generation for static
2678 evac_gen = oldest_gen->no;
2680 /* keep going until we've scavenged all the objects on the linked
2682 while (p != END_OF_STATIC_LIST) {
2686 if (info->type==RBH)
2687 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2689 /* make sure the info pointer is into text space */
2690 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2691 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2693 /* Take this object *off* the static_objects list,
2694 * and put it on the scavenged_static_objects list.
2696 static_objects = STATIC_LINK(info,p);
2697 STATIC_LINK(info,p) = scavenged_static_objects;
2698 scavenged_static_objects = p;
2700 switch (info -> type) {
2704 StgInd *ind = (StgInd *)p;
2705 ind->indirectee = evacuate(ind->indirectee);
2707 /* might fail to evacuate it, in which case we have to pop it
2708 * back on the mutable list (and take it off the
2709 * scavenged_static list because the static link and mut link
2710 * pointers are one and the same).
2712 if (failed_to_evac) {
2713 failed_to_evac = rtsFalse;
2714 scavenged_static_objects = STATIC_LINK(info,p);
2715 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2716 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2730 next = (P_)p->payload + info->layout.payload.ptrs;
2731 /* evacuate the pointers */
2732 for (q = (P_)p->payload; q < next; q++) {
2733 (StgClosure *)*q = evacuate((StgClosure *)*q);
2739 barf("scavenge_static: strange closure %d", (int)(info->type));
2742 ASSERT(failed_to_evac == rtsFalse);
2744 /* get the next static object from the list. Remember, there might
2745 * be more stuff on this list now that we've done some evacuating!
2746 * (static_objects is a global)
2752 /* -----------------------------------------------------------------------------
2753 scavenge_stack walks over a section of stack and evacuates all the
2754 objects pointed to by it. We can use the same code for walking
2755 PAPs, since these are just sections of copied stack.
2756 -------------------------------------------------------------------------- */
2757 //@cindex scavenge_stack
2760 scavenge_stack(StgPtr p, StgPtr stack_end)
2763 const StgInfoTable* info;
2766 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2769 * Each time around this loop, we are looking at a chunk of stack
2770 * that starts with either a pending argument section or an
2771 * activation record.
2774 while (p < stack_end) {
2777 /* If we've got a tag, skip over that many words on the stack */
2778 if (IS_ARG_TAG((W_)q)) {
2783 /* Is q a pointer to a closure?
2785 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2787 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2788 ASSERT(closure_STATIC((StgClosure *)q));
2790 /* otherwise, must be a pointer into the allocation space. */
2793 (StgClosure *)*p = evacuate((StgClosure *)q);
2799 * Otherwise, q must be the info pointer of an activation
2800 * record. All activation records have 'bitmap' style layout
2803 info = get_itbl((StgClosure *)p);
2805 switch (info->type) {
2807 /* Dynamic bitmap: the mask is stored on the stack */
2809 bitmap = ((StgRetDyn *)p)->liveness;
2810 p = (P_)&((StgRetDyn *)p)->payload[0];
2813 /* probably a slow-entry point return address: */
2821 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2822 old_p, p, old_p+1));
2824 p++; /* what if FHS!=1 !? -- HWL */
2829 /* Specialised code for update frames, since they're so common.
2830 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2831 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2835 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2837 nat type = get_itbl(frame->updatee)->type;
2839 p += sizeofW(StgUpdateFrame);
2840 if (type == EVACUATED) {
2841 frame->updatee = evacuate(frame->updatee);
2844 bdescr *bd = Bdescr((P_)frame->updatee);
2846 if (bd->gen->no > N) {
2847 if (bd->gen->no < evac_gen) {
2848 failed_to_evac = rtsTrue;
2853 /* Don't promote blackholes */
2855 if (!(step->gen->no == 0 &&
2857 step->no == step->gen->n_steps-1)) {
2864 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2865 sizeofW(StgHeader), step);
2866 frame->updatee = to;
2869 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2870 frame->updatee = to;
2871 recordMutable((StgMutClosure *)to);
2874 /* will never be SE_{,CAF_}BLACKHOLE, since we
2875 don't push an update frame for single-entry thunks. KSW 1999-01. */
2876 barf("scavenge_stack: UPDATE_FRAME updatee");
2881 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2888 bitmap = info->layout.bitmap;
2890 /* this assumes that the payload starts immediately after the info-ptr */
2892 while (bitmap != 0) {
2893 if ((bitmap & 1) == 0) {
2894 (StgClosure *)*p = evacuate((StgClosure *)*p);
2897 bitmap = bitmap >> 1;
2904 /* large bitmap (> 32 entries) */
2909 StgLargeBitmap *large_bitmap;
2912 large_bitmap = info->layout.large_bitmap;
2915 for (i=0; i<large_bitmap->size; i++) {
2916 bitmap = large_bitmap->bitmap[i];
2917 q = p + sizeof(W_) * 8;
2918 while (bitmap != 0) {
2919 if ((bitmap & 1) == 0) {
2920 (StgClosure *)*p = evacuate((StgClosure *)*p);
2923 bitmap = bitmap >> 1;
2925 if (i+1 < large_bitmap->size) {
2927 (StgClosure *)*p = evacuate((StgClosure *)*p);
2933 /* and don't forget to follow the SRT */
2938 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2943 /*-----------------------------------------------------------------------------
2944 scavenge the large object list.
2946 evac_gen set by caller; similar games played with evac_gen as with
2947 scavenge() - see comment at the top of scavenge(). Most large
2948 objects are (repeatedly) mutable, so most of the time evac_gen will
2950 --------------------------------------------------------------------------- */
2951 //@cindex scavenge_large
2954 scavenge_large(step *step)
2958 const StgInfoTable* info;
2959 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2961 evac_gen = 0; /* most objects are mutable */
2962 bd = step->new_large_objects;
2964 for (; bd != NULL; bd = step->new_large_objects) {
2966 /* take this object *off* the large objects list and put it on
2967 * the scavenged large objects list. This is so that we can
2968 * treat new_large_objects as a stack and push new objects on
2969 * the front when evacuating.
2971 step->new_large_objects = bd->link;
2972 dbl_link_onto(bd, &step->scavenged_large_objects);
2975 info = get_itbl((StgClosure *)p);
2977 switch (info->type) {
2979 /* only certain objects can be "large"... */
2982 /* nothing to follow */
2986 /* follow everything */
2990 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2991 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2992 (StgClosure *)*p = evacuate((StgClosure *)*p);
2997 case MUT_ARR_PTRS_FROZEN:
2998 /* follow everything */
3000 StgPtr start = p, next;
3002 evac_gen = saved_evac_gen; /* not really mutable */
3003 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3004 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3005 (StgClosure *)*p = evacuate((StgClosure *)*p);
3008 if (failed_to_evac) {
3009 recordMutable((StgMutClosure *)start);
3015 scavengeTSO((StgTSO *)p);
3021 StgPAP* pap = (StgPAP *)p;
3023 evac_gen = saved_evac_gen; /* not really mutable */
3024 pap->fun = evacuate(pap->fun);
3025 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3031 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3036 //@cindex zero_static_object_list
3039 zero_static_object_list(StgClosure* first_static)
3043 const StgInfoTable *info;
3045 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3047 link = STATIC_LINK(info, p);
3048 STATIC_LINK(info,p) = NULL;
3052 /* This function is only needed because we share the mutable link
3053 * field with the static link field in an IND_STATIC, so we have to
3054 * zero the mut_link field before doing a major GC, which needs the
3055 * static link field.
3057 * It doesn't do any harm to zero all the mutable link fields on the
3060 //@cindex zero_mutable_list
3063 zero_mutable_list( StgMutClosure *first )
3065 StgMutClosure *next, *c;
3067 for (c = first; c != END_MUT_LIST; c = next) {
3073 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3074 //@subsection Reverting CAFs
3076 /* -----------------------------------------------------------------------------
3078 -------------------------------------------------------------------------- */
3079 //@cindex RevertCAFs
3081 void RevertCAFs(void)
3086 /* Deal with CAFs created by compiled code. */
3087 for (i = 0; i < usedECafTable; i++) {
3088 SET_INFO( (StgInd*)(ecafTable[i].closure), ecafTable[i].origItbl );
3089 ((StgInd*)(ecafTable[i].closure))->indirectee = 0;
3092 /* Deal with CAFs created by the interpreter. */
3093 while (ecafList != END_ECAF_LIST) {
3094 StgCAF* caf = ecafList;
3095 ecafList = caf->link;
3096 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3097 SET_INFO(caf,&CAF_UNENTERED_info);
3098 caf->value = (StgClosure *)0xdeadbeef;
3099 caf->link = (StgCAF *)0xdeadbeef;
3102 /* Empty out both the table and the list. */
3104 ecafList = END_ECAF_LIST;
3108 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3109 //@subsection Sanity code for CAF garbage collection
3111 /* -----------------------------------------------------------------------------
3112 Sanity code for CAF garbage collection.
3114 With DEBUG turned on, we manage a CAF list in addition to the SRT
3115 mechanism. After GC, we run down the CAF list and blackhole any
3116 CAFs which have been garbage collected. This means we get an error
3117 whenever the program tries to enter a garbage collected CAF.
3119 Any garbage collected CAFs are taken off the CAF list at the same
3121 -------------------------------------------------------------------------- */
3131 const StgInfoTable *info;
3142 ASSERT(info->type == IND_STATIC);
3144 if (STATIC_LINK(info,p) == NULL) {
3145 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3147 SET_INFO(p,&stg_BLACKHOLE_info);
3148 p = STATIC_LINK2(info,p);
3152 pp = &STATIC_LINK2(info,p);
3159 /* fprintf(stderr, "%d CAFs live\n", i); */
3163 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3164 //@subsection Lazy black holing
3166 /* -----------------------------------------------------------------------------
3169 Whenever a thread returns to the scheduler after possibly doing
3170 some work, we have to run down the stack and black-hole all the
3171 closures referred to by update frames.
3172 -------------------------------------------------------------------------- */
3173 //@cindex threadLazyBlackHole
3176 threadLazyBlackHole(StgTSO *tso)
3178 StgUpdateFrame *update_frame;
3179 StgBlockingQueue *bh;
3182 stack_end = &tso->stack[tso->stack_size];
3183 update_frame = tso->su;
3186 switch (get_itbl(update_frame)->type) {
3189 update_frame = ((StgCatchFrame *)update_frame)->link;
3193 bh = (StgBlockingQueue *)update_frame->updatee;
3195 /* if the thunk is already blackholed, it means we've also
3196 * already blackholed the rest of the thunks on this stack,
3197 * so we can stop early.
3199 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3200 * don't interfere with this optimisation.
3202 if (bh->header.info == &stg_BLACKHOLE_info) {
3206 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3207 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3208 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3209 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3211 SET_INFO(bh,&stg_BLACKHOLE_info);
3214 update_frame = update_frame->link;
3218 update_frame = ((StgSeqFrame *)update_frame)->link;
3224 barf("threadPaused");
3229 //@node Stack squeezing, Pausing a thread, Lazy black holing
3230 //@subsection Stack squeezing
3232 /* -----------------------------------------------------------------------------
3235 * Code largely pinched from old RTS, then hacked to bits. We also do
3236 * lazy black holing here.
3238 * -------------------------------------------------------------------------- */
3239 //@cindex threadSqueezeStack
3242 threadSqueezeStack(StgTSO *tso)
3244 lnat displacement = 0;
3245 StgUpdateFrame *frame;
3246 StgUpdateFrame *next_frame; /* Temporally next */
3247 StgUpdateFrame *prev_frame; /* Temporally previous */
3249 rtsBool prev_was_update_frame;
3251 StgUpdateFrame *top_frame;
3252 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3254 void printObj( StgClosure *obj ); // from Printer.c
3256 top_frame = tso->su;
3259 bottom = &(tso->stack[tso->stack_size]);
3262 /* There must be at least one frame, namely the STOP_FRAME.
3264 ASSERT((P_)frame < bottom);
3266 /* Walk down the stack, reversing the links between frames so that
3267 * we can walk back up as we squeeze from the bottom. Note that
3268 * next_frame and prev_frame refer to next and previous as they were
3269 * added to the stack, rather than the way we see them in this
3270 * walk. (It makes the next loop less confusing.)
3272 * Stop if we find an update frame pointing to a black hole
3273 * (see comment in threadLazyBlackHole()).
3277 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3278 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3279 prev_frame = frame->link;
3280 frame->link = next_frame;
3285 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3286 printObj((StgClosure *)prev_frame);
3287 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3290 switch (get_itbl(frame)->type) {
3291 case UPDATE_FRAME: upd_frames++;
3292 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3295 case STOP_FRAME: stop_frames++;
3297 case CATCH_FRAME: catch_frames++;
3299 case SEQ_FRAME: seq_frames++;
3302 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3304 printObj((StgClosure *)prev_frame);
3307 if (get_itbl(frame)->type == UPDATE_FRAME
3308 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3313 /* Now, we're at the bottom. Frame points to the lowest update
3314 * frame on the stack, and its link actually points to the frame
3315 * above. We have to walk back up the stack, squeezing out empty
3316 * update frames and turning the pointers back around on the way
3319 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3320 * we never want to eliminate it anyway. Just walk one step up
3321 * before starting to squeeze. When you get to the topmost frame,
3322 * remember that there are still some words above it that might have
3329 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3332 * Loop through all of the frames (everything except the very
3333 * bottom). Things are complicated by the fact that we have
3334 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3335 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3337 while (frame != NULL) {
3339 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3340 rtsBool is_update_frame;
3342 next_frame = frame->link;
3343 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3346 * 1. both the previous and current frame are update frames
3347 * 2. the current frame is empty
3349 if (prev_was_update_frame && is_update_frame &&
3350 (P_)prev_frame == frame_bottom + displacement) {
3352 /* Now squeeze out the current frame */
3353 StgClosure *updatee_keep = prev_frame->updatee;
3354 StgClosure *updatee_bypass = frame->updatee;
3357 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3361 /* Deal with blocking queues. If both updatees have blocked
3362 * threads, then we should merge the queues into the update
3363 * frame that we're keeping.
3365 * Alternatively, we could just wake them up: they'll just go
3366 * straight to sleep on the proper blackhole! This is less code
3367 * and probably less bug prone, although it's probably much
3370 #if 0 /* do it properly... */
3371 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3372 # error Unimplemented lazy BH warning. (KSW 1999-01)
3374 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3375 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3377 /* Sigh. It has one. Don't lose those threads! */
3378 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3379 /* Urgh. Two queues. Merge them. */
3380 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3382 while (keep_tso->link != END_TSO_QUEUE) {
3383 keep_tso = keep_tso->link;
3385 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3388 /* For simplicity, just swap the BQ for the BH */
3389 P_ temp = updatee_keep;
3391 updatee_keep = updatee_bypass;
3392 updatee_bypass = temp;
3394 /* Record the swap in the kept frame (below) */
3395 prev_frame->updatee = updatee_keep;
3400 TICK_UPD_SQUEEZED();
3401 /* wasn't there something about update squeezing and ticky to be
3402 * sorted out? oh yes: we aren't counting each enter properly
3403 * in this case. See the log somewhere. KSW 1999-04-21
3405 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3407 sp = (P_)frame - 1; /* sp = stuff to slide */
3408 displacement += sizeofW(StgUpdateFrame);
3411 /* No squeeze for this frame */
3412 sp = frame_bottom - 1; /* Keep the current frame */
3414 /* Do lazy black-holing.
3416 if (is_update_frame) {
3417 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3418 if (bh->header.info != &stg_BLACKHOLE_info &&
3419 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3420 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3421 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3422 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3425 /* zero out the slop so that the sanity checker can tell
3426 * where the next closure is.
3429 StgInfoTable *info = get_itbl(bh);
3430 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3431 for (i = np; i < np + nw; i++) {
3432 ((StgClosure *)bh)->payload[i] = 0;
3436 SET_INFO(bh,&stg_BLACKHOLE_info);
3440 /* Fix the link in the current frame (should point to the frame below) */
3441 frame->link = prev_frame;
3442 prev_was_update_frame = is_update_frame;
3445 /* Now slide all words from sp up to the next frame */
3447 if (displacement > 0) {
3448 P_ next_frame_bottom;
3450 if (next_frame != NULL)
3451 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3453 next_frame_bottom = tso->sp - 1;
3457 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3461 while (sp >= next_frame_bottom) {
3462 sp[displacement] = *sp;
3466 (P_)prev_frame = (P_)frame + displacement;
3470 tso->sp += displacement;
3471 tso->su = prev_frame;
3474 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3475 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3479 //@node Pausing a thread, Index, Stack squeezing
3480 //@subsection Pausing a thread
3482 /* -----------------------------------------------------------------------------
3485 * We have to prepare for GC - this means doing lazy black holing
3486 * here. We also take the opportunity to do stack squeezing if it's
3488 * -------------------------------------------------------------------------- */
3489 //@cindex threadPaused
3491 threadPaused(StgTSO *tso)
3493 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3494 threadSqueezeStack(tso); /* does black holing too */
3496 threadLazyBlackHole(tso);
3499 /* -----------------------------------------------------------------------------
3501 * -------------------------------------------------------------------------- */
3504 //@cindex printMutOnceList
3506 printMutOnceList(generation *gen)
3508 StgMutClosure *p, *next;
3510 p = gen->mut_once_list;
3513 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3514 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3515 fprintf(stderr, "%p (%s), ",
3516 p, info_type((StgClosure *)p));
3518 fputc('\n', stderr);
3521 //@cindex printMutableList
3523 printMutableList(generation *gen)
3525 StgMutClosure *p, *next;
3530 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3531 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3532 fprintf(stderr, "%p (%s), ",
3533 p, info_type((StgClosure *)p));
3535 fputc('\n', stderr);
3538 //@cindex maybeLarge
3539 static inline rtsBool
3540 maybeLarge(StgClosure *closure)
3542 StgInfoTable *info = get_itbl(closure);
3544 /* closure types that may be found on the new_large_objects list;
3545 see scavenge_large */
3546 return (info->type == MUT_ARR_PTRS ||
3547 info->type == MUT_ARR_PTRS_FROZEN ||
3548 info->type == TSO ||
3549 info->type == ARR_WORDS);
3555 //@node Index, , Pausing a thread
3559 //* GarbageCollect:: @cindex\s-+GarbageCollect
3560 //* MarkRoot:: @cindex\s-+MarkRoot
3561 //* RevertCAFs:: @cindex\s-+RevertCAFs
3562 //* addBlock:: @cindex\s-+addBlock
3563 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3564 //* copy:: @cindex\s-+copy
3565 //* copyPart:: @cindex\s-+copyPart
3566 //* evacuate:: @cindex\s-+evacuate
3567 //* evacuate_large:: @cindex\s-+evacuate_large
3568 //* gcCAFs:: @cindex\s-+gcCAFs
3569 //* isAlive:: @cindex\s-+isAlive
3570 //* maybeLarge:: @cindex\s-+maybeLarge
3571 //* mkMutCons:: @cindex\s-+mkMutCons
3572 //* printMutOnceList:: @cindex\s-+printMutOnceList
3573 //* printMutableList:: @cindex\s-+printMutableList
3574 //* relocate_TSO:: @cindex\s-+relocate_TSO
3575 //* scavenge:: @cindex\s-+scavenge
3576 //* scavenge_large:: @cindex\s-+scavenge_large
3577 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3578 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3579 //* scavenge_one:: @cindex\s-+scavenge_one
3580 //* scavenge_srt:: @cindex\s-+scavenge_srt
3581 //* scavenge_stack:: @cindex\s-+scavenge_stack
3582 //* scavenge_static:: @cindex\s-+scavenge_static
3583 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3584 //* threadPaused:: @cindex\s-+threadPaused
3585 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3586 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3587 //* upd_evacuee:: @cindex\s-+upd_evacuee
3588 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3589 //* zero_static_object_list:: @cindex\s-+zero_static_object_list