1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.90 2000/12/04 12:31:20 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
45 #include "StablePriv.h"
47 #if defined(GRAN) || defined(PAR)
48 # include "GranSimRts.h"
49 # include "ParallelRts.h"
53 # include "ParallelDebug.h"
60 #if defined(RTS_GTK_FRONTPANEL)
61 #include "FrontPanel.h"
64 //@node STATIC OBJECT LIST, Static function declarations, Includes
65 //@subsection STATIC OBJECT LIST
67 /* STATIC OBJECT LIST.
70 * We maintain a linked list of static objects that are still live.
71 * The requirements for this list are:
73 * - we need to scan the list while adding to it, in order to
74 * scavenge all the static objects (in the same way that
75 * breadth-first scavenging works for dynamic objects).
77 * - we need to be able to tell whether an object is already on
78 * the list, to break loops.
80 * Each static object has a "static link field", which we use for
81 * linking objects on to the list. We use a stack-type list, consing
82 * objects on the front as they are added (this means that the
83 * scavenge phase is depth-first, not breadth-first, but that
86 * A separate list is kept for objects that have been scavenged
87 * already - this is so that we can zero all the marks afterwards.
89 * An object is on the list if its static link field is non-zero; this
90 * means that we have to mark the end of the list with '1', not NULL.
92 * Extra notes for generational GC:
94 * Each generation has a static object list associated with it. When
95 * collecting generations up to N, we treat the static object lists
96 * from generations > N as roots.
98 * We build up a static object list while collecting generations 0..N,
99 * which is then appended to the static object list of generation N+1.
101 StgClosure* static_objects; /* live static objects */
102 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
104 /* N is the oldest generation being collected, where the generations
105 * are numbered starting at 0. A major GC (indicated by the major_gc
106 * flag) is when we're collecting all generations. We only attempt to
107 * deal with static objects and GC CAFs when doing a major GC.
110 static rtsBool major_gc;
112 /* Youngest generation that objects should be evacuated to in
113 * evacuate(). (Logically an argument to evacuate, but it's static
114 * a lot of the time so we optimise it into a global variable).
120 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
121 static rtsBool weak_done; /* all done for this pass */
123 /* List of all threads during GC
125 static StgTSO *old_all_threads;
126 static StgTSO *resurrected_threads;
128 /* Flag indicating failure to evacuate an object to the desired
131 static rtsBool failed_to_evac;
133 /* Old to-space (used for two-space collector only)
135 bdescr *old_to_space;
137 /* Data used for allocation area sizing.
139 lnat new_blocks; /* blocks allocated during this GC */
140 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
142 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
143 //@subsection Static function declarations
145 /* -----------------------------------------------------------------------------
146 Static function declarations
147 -------------------------------------------------------------------------- */
149 static StgClosure * evacuate ( StgClosure *q );
150 static void zero_static_object_list ( StgClosure* first_static );
151 static void zero_mutable_list ( StgMutClosure *first );
153 static rtsBool traverse_weak_ptr_list ( void );
154 static void cleanup_weak_ptr_list ( StgWeak **list );
156 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
157 static void scavenge_large ( step *step );
158 static void scavenge ( step *step );
159 static void scavenge_static ( void );
160 static void scavenge_mutable_list ( generation *g );
161 static void scavenge_mut_once_list ( generation *g );
164 static void gcCAFs ( void );
167 //@node Garbage Collect, Weak Pointers, Static function declarations
168 //@subsection Garbage Collect
170 /* -----------------------------------------------------------------------------
173 For garbage collecting generation N (and all younger generations):
175 - follow all pointers in the root set. the root set includes all
176 mutable objects in all steps in all generations.
178 - for each pointer, evacuate the object it points to into either
179 + to-space in the next higher step in that generation, if one exists,
180 + if the object's generation == N, then evacuate it to the next
181 generation if one exists, or else to-space in the current
183 + if the object's generation < N, then evacuate it to to-space
184 in the next generation.
186 - repeatedly scavenge to-space from each step in each generation
187 being collected until no more objects can be evacuated.
189 - free from-space in each step, and set from-space = to-space.
191 -------------------------------------------------------------------------- */
192 //@cindex GarbageCollect
194 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
198 lnat live, allocated, collected = 0, copied = 0;
202 CostCentreStack *prev_CCS;
205 #if defined(DEBUG) && defined(GRAN)
206 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
210 /* tell the stats department that we've started a GC */
213 /* attribute any costs to CCS_GC */
219 /* Approximate how much we allocated.
220 * Todo: only when generating stats?
222 allocated = calcAllocated();
224 /* Figure out which generation to collect
226 if (force_major_gc) {
227 N = RtsFlags.GcFlags.generations - 1;
231 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
232 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
236 major_gc = (N == RtsFlags.GcFlags.generations-1);
239 #ifdef RTS_GTK_FRONTPANEL
240 if (RtsFlags.GcFlags.frontpanel) {
241 updateFrontPanelBeforeGC(N);
245 /* check stack sanity *before* GC (ToDo: check all threads) */
247 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
249 IF_DEBUG(sanity, checkFreeListSanity());
251 /* Initialise the static object lists
253 static_objects = END_OF_STATIC_LIST;
254 scavenged_static_objects = END_OF_STATIC_LIST;
256 /* zero the mutable list for the oldest generation (see comment by
257 * zero_mutable_list below).
260 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
263 /* Save the old to-space if we're doing a two-space collection
265 if (RtsFlags.GcFlags.generations == 1) {
266 old_to_space = g0s0->to_space;
267 g0s0->to_space = NULL;
270 /* Keep a count of how many new blocks we allocated during this GC
271 * (used for resizing the allocation area, later).
275 /* Initialise to-space in all the generations/steps that we're
278 for (g = 0; g <= N; g++) {
279 generations[g].mut_once_list = END_MUT_LIST;
280 generations[g].mut_list = END_MUT_LIST;
282 for (s = 0; s < generations[g].n_steps; s++) {
284 /* generation 0, step 0 doesn't need to-space */
285 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
289 /* Get a free block for to-space. Extra blocks will be chained on
293 step = &generations[g].steps[s];
294 ASSERT(step->gen->no == g);
295 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
296 bd->gen = &generations[g];
299 bd->evacuated = 1; /* it's a to-space block */
300 step->hp = bd->start;
301 step->hpLim = step->hp + BLOCK_SIZE_W;
305 step->scan = bd->start;
307 step->new_large_objects = NULL;
308 step->scavenged_large_objects = NULL;
310 /* mark the large objects as not evacuated yet */
311 for (bd = step->large_objects; bd; bd = bd->link) {
317 /* make sure the older generations have at least one block to
318 * allocate into (this makes things easier for copy(), see below.
320 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
321 for (s = 0; s < generations[g].n_steps; s++) {
322 step = &generations[g].steps[s];
323 if (step->hp_bd == NULL) {
325 bd->gen = &generations[g];
328 bd->evacuated = 0; /* *not* a to-space block */
329 step->hp = bd->start;
330 step->hpLim = step->hp + BLOCK_SIZE_W;
336 /* Set the scan pointer for older generations: remember we
337 * still have to scavenge objects that have been promoted. */
338 step->scan = step->hp;
339 step->scan_bd = step->hp_bd;
340 step->to_space = NULL;
342 step->new_large_objects = NULL;
343 step->scavenged_large_objects = NULL;
347 /* -----------------------------------------------------------------------
348 * follow all the roots that we know about:
349 * - mutable lists from each generation > N
350 * we want to *scavenge* these roots, not evacuate them: they're not
351 * going to move in this GC.
352 * Also: do them in reverse generation order. This is because we
353 * often want to promote objects that are pointed to by older
354 * generations early, so we don't have to repeatedly copy them.
355 * Doing the generations in reverse order ensures that we don't end
356 * up in the situation where we want to evac an object to gen 3 and
357 * it has already been evaced to gen 2.
361 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
362 generations[g].saved_mut_list = generations[g].mut_list;
363 generations[g].mut_list = END_MUT_LIST;
366 /* Do the mut-once lists first */
367 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
368 IF_PAR_DEBUG(verbose,
369 printMutOnceList(&generations[g]));
370 scavenge_mut_once_list(&generations[g]);
372 for (st = generations[g].n_steps-1; st >= 0; st--) {
373 scavenge(&generations[g].steps[st]);
377 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
378 IF_PAR_DEBUG(verbose,
379 printMutableList(&generations[g]));
380 scavenge_mutable_list(&generations[g]);
382 for (st = generations[g].n_steps-1; st >= 0; st--) {
383 scavenge(&generations[g].steps[st]);
388 /* follow all the roots that the application knows about.
394 /* And don't forget to mark the TSO if we got here direct from
396 /* Not needed in a seq version?
398 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
402 /* Mark the entries in the GALA table of the parallel system */
403 markLocalGAs(major_gc);
406 /* Mark the weak pointer list, and prepare to detect dead weak
409 old_weak_ptr_list = weak_ptr_list;
410 weak_ptr_list = NULL;
411 weak_done = rtsFalse;
413 /* The all_threads list is like the weak_ptr_list.
414 * See traverse_weak_ptr_list() for the details.
416 old_all_threads = all_threads;
417 all_threads = END_TSO_QUEUE;
418 resurrected_threads = END_TSO_QUEUE;
420 /* Mark the stable pointer table.
422 markStablePtrTable(major_gc);
426 /* ToDo: To fix the caf leak, we need to make the commented out
427 * parts of this code do something sensible - as described in
430 extern void markHugsObjects(void);
435 /* -------------------------------------------------------------------------
436 * Repeatedly scavenge all the areas we know about until there's no
437 * more scavenging to be done.
444 /* scavenge static objects */
445 if (major_gc && static_objects != END_OF_STATIC_LIST) {
447 checkStaticObjects());
451 /* When scavenging the older generations: Objects may have been
452 * evacuated from generations <= N into older generations, and we
453 * need to scavenge these objects. We're going to try to ensure that
454 * any evacuations that occur move the objects into at least the
455 * same generation as the object being scavenged, otherwise we
456 * have to create new entries on the mutable list for the older
460 /* scavenge each step in generations 0..maxgen */
464 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
465 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
466 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
469 step = &generations[gen].steps[st];
471 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
476 if (step->new_large_objects != NULL) {
477 scavenge_large(step);
484 if (flag) { goto loop; }
486 /* must be last... */
487 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
492 /* Final traversal of the weak pointer list (see comment by
493 * cleanUpWeakPtrList below).
495 cleanup_weak_ptr_list(&weak_ptr_list);
497 /* Now see which stable names are still alive.
499 gcStablePtrTable(major_gc);
502 /* Reconstruct the Global Address tables used in GUM */
503 rebuildGAtables(major_gc);
504 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
505 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
508 /* Set the maximum blocks for the oldest generation, based on twice
509 * the amount of live data now, adjusted to fit the maximum heap
512 * This is an approximation, since in the worst case we'll need
513 * twice the amount of live data plus whatever space the other
516 if (RtsFlags.GcFlags.generations > 1) {
518 oldest_gen->max_blocks =
519 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
520 RtsFlags.GcFlags.minOldGenSize);
521 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
522 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
523 if (((int)oldest_gen->max_blocks -
524 (int)oldest_gen->steps[0].to_blocks) <
525 (RtsFlags.GcFlags.pcFreeHeap *
526 RtsFlags.GcFlags.maxHeapSize / 200)) {
533 /* run through all the generations/steps and tidy up
535 copied = new_blocks * BLOCK_SIZE_W;
536 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
539 generations[g].collections++; /* for stats */
542 for (s = 0; s < generations[g].n_steps; s++) {
544 step = &generations[g].steps[s];
546 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
547 /* Tidy the end of the to-space chains */
548 step->hp_bd->free = step->hp;
549 step->hp_bd->link = NULL;
550 /* stats information: how much we copied */
552 copied -= step->hp_bd->start + BLOCK_SIZE_W -
557 /* for generations we collected... */
560 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
562 /* free old memory and shift to-space into from-space for all
563 * the collected steps (except the allocation area). These
564 * freed blocks will probaby be quickly recycled.
566 if (!(g == 0 && s == 0)) {
567 freeChain(step->blocks);
568 step->blocks = step->to_space;
569 step->n_blocks = step->to_blocks;
570 step->to_space = NULL;
572 for (bd = step->blocks; bd != NULL; bd = bd->link) {
573 bd->evacuated = 0; /* now from-space */
577 /* LARGE OBJECTS. The current live large objects are chained on
578 * scavenged_large, having been moved during garbage
579 * collection from large_objects. Any objects left on
580 * large_objects list are therefore dead, so we free them here.
582 for (bd = step->large_objects; bd != NULL; bd = next) {
587 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
590 step->large_objects = step->scavenged_large_objects;
592 /* Set the maximum blocks for this generation, interpolating
593 * between the maximum size of the oldest and youngest
596 * max_blocks = oldgen_max_blocks * G
597 * ----------------------
602 generations[g].max_blocks = (oldest_gen->max_blocks * g)
603 / (RtsFlags.GcFlags.generations-1);
605 generations[g].max_blocks = oldest_gen->max_blocks;
608 /* for older generations... */
611 /* For older generations, we need to append the
612 * scavenged_large_object list (i.e. large objects that have been
613 * promoted during this GC) to the large_object list for that step.
615 for (bd = step->scavenged_large_objects; bd; bd = next) {
618 dbl_link_onto(bd, &step->large_objects);
621 /* add the new blocks we promoted during this GC */
622 step->n_blocks += step->to_blocks;
627 /* Guess the amount of live data for stats. */
630 /* Free the small objects allocated via allocate(), since this will
631 * all have been copied into G0S1 now.
633 if (small_alloc_list != NULL) {
634 freeChain(small_alloc_list);
636 small_alloc_list = NULL;
640 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
642 /* Two-space collector:
643 * Free the old to-space, and estimate the amount of live data.
645 if (RtsFlags.GcFlags.generations == 1) {
648 if (old_to_space != NULL) {
649 freeChain(old_to_space);
651 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
652 bd->evacuated = 0; /* now from-space */
655 /* For a two-space collector, we need to resize the nursery. */
657 /* set up a new nursery. Allocate a nursery size based on a
658 * function of the amount of live data (currently a factor of 2,
659 * should be configurable (ToDo)). Use the blocks from the old
660 * nursery if possible, freeing up any left over blocks.
662 * If we get near the maximum heap size, then adjust our nursery
663 * size accordingly. If the nursery is the same size as the live
664 * data (L), then we need 3L bytes. We can reduce the size of the
665 * nursery to bring the required memory down near 2L bytes.
667 * A normal 2-space collector would need 4L bytes to give the same
668 * performance we get from 3L bytes, reducing to the same
669 * performance at 2L bytes.
671 blocks = g0s0->to_blocks;
673 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
674 RtsFlags.GcFlags.maxHeapSize ) {
675 int adjusted_blocks; /* signed on purpose */
678 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
679 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
680 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
681 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
684 blocks = adjusted_blocks;
687 blocks *= RtsFlags.GcFlags.oldGenFactor;
688 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
689 blocks = RtsFlags.GcFlags.minAllocAreaSize;
692 resizeNursery(blocks);
695 /* Generational collector:
696 * If the user has given us a suggested heap size, adjust our
697 * allocation area to make best use of the memory available.
700 if (RtsFlags.GcFlags.heapSizeSuggestion) {
702 nat needed = calcNeeded(); /* approx blocks needed at next GC */
704 /* Guess how much will be live in generation 0 step 0 next time.
705 * A good approximation is the obtained by finding the
706 * percentage of g0s0 that was live at the last minor GC.
709 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
712 /* Estimate a size for the allocation area based on the
713 * information available. We might end up going slightly under
714 * or over the suggested heap size, but we should be pretty
717 * Formula: suggested - needed
718 * ----------------------------
719 * 1 + g0s0_pcnt_kept/100
721 * where 'needed' is the amount of memory needed at the next
722 * collection for collecting all steps except g0s0.
725 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
726 (100 + (int)g0s0_pcnt_kept);
728 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
729 blocks = RtsFlags.GcFlags.minAllocAreaSize;
732 resizeNursery((nat)blocks);
736 /* mark the garbage collected CAFs as dead */
738 if (major_gc) { gcCAFs(); }
741 /* zero the scavenged static object list */
743 zero_static_object_list(scavenged_static_objects);
750 /* start any pending finalizers */
751 scheduleFinalizers(old_weak_ptr_list);
753 /* send exceptions to any threads which were about to die */
754 resurrectThreads(resurrected_threads);
756 /* check sanity after GC */
757 IF_DEBUG(sanity, checkSanity(N));
759 /* extra GC trace info */
760 IF_DEBUG(gc, stat_describe_gens());
763 /* symbol-table based profiling */
764 /* heapCensus(to_space); */ /* ToDo */
767 /* restore enclosing cost centre */
773 /* check for memory leaks if sanity checking is on */
774 IF_DEBUG(sanity, memInventory());
776 #ifdef RTS_GTK_VISUALS
777 if (RtsFlags.GcFlags.visuals) {
778 updateFrontPanelAfterGC( N, live );
782 /* ok, GC over: tell the stats department what happened. */
783 stat_endGC(allocated, collected, live, copied, N);
786 //@node Weak Pointers, Evacuation, Garbage Collect
787 //@subsection Weak Pointers
789 /* -----------------------------------------------------------------------------
792 traverse_weak_ptr_list is called possibly many times during garbage
793 collection. It returns a flag indicating whether it did any work
794 (i.e. called evacuate on any live pointers).
796 Invariant: traverse_weak_ptr_list is called when the heap is in an
797 idempotent state. That means that there are no pending
798 evacuate/scavenge operations. This invariant helps the weak
799 pointer code decide which weak pointers are dead - if there are no
800 new live weak pointers, then all the currently unreachable ones are
803 For generational GC: we just don't try to finalize weak pointers in
804 older generations than the one we're collecting. This could
805 probably be optimised by keeping per-generation lists of weak
806 pointers, but for a few weak pointers this scheme will work.
807 -------------------------------------------------------------------------- */
808 //@cindex traverse_weak_ptr_list
811 traverse_weak_ptr_list(void)
813 StgWeak *w, **last_w, *next_w;
815 rtsBool flag = rtsFalse;
817 if (weak_done) { return rtsFalse; }
819 /* doesn't matter where we evacuate values/finalizers to, since
820 * these pointers are treated as roots (iff the keys are alive).
824 last_w = &old_weak_ptr_list;
825 for (w = old_weak_ptr_list; w; w = next_w) {
827 /* First, this weak pointer might have been evacuated. If so,
828 * remove the forwarding pointer from the weak_ptr_list.
830 if (get_itbl(w)->type == EVACUATED) {
831 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
835 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
836 * called on a live weak pointer object. Just remove it.
838 if (w->header.info == &stg_DEAD_WEAK_info) {
839 next_w = ((StgDeadWeak *)w)->link;
844 ASSERT(get_itbl(w)->type == WEAK);
846 /* Now, check whether the key is reachable.
848 if ((new = isAlive(w->key))) {
850 /* evacuate the value and finalizer */
851 w->value = evacuate(w->value);
852 w->finalizer = evacuate(w->finalizer);
853 /* remove this weak ptr from the old_weak_ptr list */
855 /* and put it on the new weak ptr list */
857 w->link = weak_ptr_list;
860 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
870 /* Now deal with the all_threads list, which behaves somewhat like
871 * the weak ptr list. If we discover any threads that are about to
872 * become garbage, we wake them up and administer an exception.
875 StgTSO *t, *tmp, *next, **prev;
877 prev = &old_all_threads;
878 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
880 /* Threads which have finished or died get dropped from
883 switch (t->what_next) {
884 case ThreadRelocated:
890 next = t->global_link;
896 /* Threads which have already been determined to be alive are
897 * moved onto the all_threads list.
899 (StgClosure *)tmp = isAlive((StgClosure *)t);
901 next = tmp->global_link;
902 tmp->global_link = all_threads;
906 prev = &(t->global_link);
907 next = t->global_link;
912 /* If we didn't make any changes, then we can go round and kill all
913 * the dead weak pointers. The old_weak_ptr list is used as a list
914 * of pending finalizers later on.
916 if (flag == rtsFalse) {
917 cleanup_weak_ptr_list(&old_weak_ptr_list);
918 for (w = old_weak_ptr_list; w; w = w->link) {
919 w->finalizer = evacuate(w->finalizer);
922 /* And resurrect any threads which were about to become garbage.
925 StgTSO *t, *tmp, *next;
926 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
927 next = t->global_link;
928 (StgClosure *)tmp = evacuate((StgClosure *)t);
929 tmp->global_link = resurrected_threads;
930 resurrected_threads = tmp;
940 /* -----------------------------------------------------------------------------
941 After GC, the live weak pointer list may have forwarding pointers
942 on it, because a weak pointer object was evacuated after being
943 moved to the live weak pointer list. We remove those forwarding
946 Also, we don't consider weak pointer objects to be reachable, but
947 we must nevertheless consider them to be "live" and retain them.
948 Therefore any weak pointer objects which haven't as yet been
949 evacuated need to be evacuated now.
950 -------------------------------------------------------------------------- */
952 //@cindex cleanup_weak_ptr_list
955 cleanup_weak_ptr_list ( StgWeak **list )
957 StgWeak *w, **last_w;
960 for (w = *list; w; w = w->link) {
962 if (get_itbl(w)->type == EVACUATED) {
963 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
967 if (Bdescr((P_)w)->evacuated == 0) {
968 (StgClosure *)w = evacuate((StgClosure *)w);
975 /* -----------------------------------------------------------------------------
976 isAlive determines whether the given closure is still alive (after
977 a garbage collection) or not. It returns the new address of the
978 closure if it is alive, or NULL otherwise.
979 -------------------------------------------------------------------------- */
984 isAlive(StgClosure *p)
986 const StgInfoTable *info;
993 /* ToDo: for static closures, check the static link field.
994 * Problem here is that we sometimes don't set the link field, eg.
995 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
998 /* ignore closures in generations that we're not collecting. */
999 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
1003 switch (info->type) {
1008 case IND_OLDGEN: /* rely on compatible layout with StgInd */
1009 case IND_OLDGEN_PERM:
1010 /* follow indirections */
1011 p = ((StgInd *)p)->indirectee;
1016 return ((StgEvacuated *)p)->evacuee;
1019 size = bco_sizeW((StgBCO*)p);
1023 size = arr_words_sizeW((StgArrWords *)p);
1027 case MUT_ARR_PTRS_FROZEN:
1028 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1032 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1033 p = (StgClosure *)((StgTSO *)p)->link;
1037 size = tso_sizeW((StgTSO *)p);
1039 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1040 && Bdescr((P_)p)->evacuated)
1054 MarkRoot(StgClosure *root)
1056 # if 0 && defined(PAR) && defined(DEBUG)
1057 StgClosure *foo = evacuate(root);
1058 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1059 ASSERT(isAlive(foo)); // must be in to-space
1062 return evacuate(root);
1067 static void addBlock(step *step)
1069 bdescr *bd = allocBlock();
1070 bd->gen = step->gen;
1073 if (step->gen->no <= N) {
1079 step->hp_bd->free = step->hp;
1080 step->hp_bd->link = bd;
1081 step->hp = bd->start;
1082 step->hpLim = step->hp + BLOCK_SIZE_W;
1088 //@cindex upd_evacuee
1090 static __inline__ void
1091 upd_evacuee(StgClosure *p, StgClosure *dest)
1093 p->header.info = &stg_EVACUATED_info;
1094 ((StgEvacuated *)p)->evacuee = dest;
1099 static __inline__ StgClosure *
1100 copy(StgClosure *src, nat size, step *step)
1104 TICK_GC_WORDS_COPIED(size);
1105 /* Find out where we're going, using the handy "to" pointer in
1106 * the step of the source object. If it turns out we need to
1107 * evacuate to an older generation, adjust it here (see comment
1110 if (step->gen->no < evac_gen) {
1111 #ifdef NO_EAGER_PROMOTION
1112 failed_to_evac = rtsTrue;
1114 step = &generations[evac_gen].steps[0];
1118 /* chain a new block onto the to-space for the destination step if
1121 if (step->hp + size >= step->hpLim) {
1125 for(to = step->hp, from = (P_)src; size>0; --size) {
1131 upd_evacuee(src,(StgClosure *)dest);
1132 return (StgClosure *)dest;
1135 /* Special version of copy() for when we only want to copy the info
1136 * pointer of an object, but reserve some padding after it. This is
1137 * used to optimise evacuation of BLACKHOLEs.
1142 static __inline__ StgClosure *
1143 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1147 TICK_GC_WORDS_COPIED(size_to_copy);
1148 if (step->gen->no < evac_gen) {
1149 #ifdef NO_EAGER_PROMOTION
1150 failed_to_evac = rtsTrue;
1152 step = &generations[evac_gen].steps[0];
1156 if (step->hp + size_to_reserve >= step->hpLim) {
1160 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1165 step->hp += size_to_reserve;
1166 upd_evacuee(src,(StgClosure *)dest);
1167 return (StgClosure *)dest;
1170 //@node Evacuation, Scavenging, Weak Pointers
1171 //@subsection Evacuation
1173 /* -----------------------------------------------------------------------------
1174 Evacuate a large object
1176 This just consists of removing the object from the (doubly-linked)
1177 large_alloc_list, and linking it on to the (singly-linked)
1178 new_large_objects list, from where it will be scavenged later.
1180 Convention: bd->evacuated is /= 0 for a large object that has been
1181 evacuated, or 0 otherwise.
1182 -------------------------------------------------------------------------- */
1184 //@cindex evacuate_large
1187 evacuate_large(StgPtr p, rtsBool mutable)
1189 bdescr *bd = Bdescr(p);
1192 /* should point to the beginning of the block */
1193 ASSERT(((W_)p & BLOCK_MASK) == 0);
1195 /* already evacuated? */
1196 if (bd->evacuated) {
1197 /* Don't forget to set the failed_to_evac flag if we didn't get
1198 * the desired destination (see comments in evacuate()).
1200 if (bd->gen->no < evac_gen) {
1201 failed_to_evac = rtsTrue;
1202 TICK_GC_FAILED_PROMOTION();
1208 /* remove from large_object list */
1210 bd->back->link = bd->link;
1211 } else { /* first object in the list */
1212 step->large_objects = bd->link;
1215 bd->link->back = bd->back;
1218 /* link it on to the evacuated large object list of the destination step
1220 step = bd->step->to;
1221 if (step->gen->no < evac_gen) {
1222 #ifdef NO_EAGER_PROMOTION
1223 failed_to_evac = rtsTrue;
1225 step = &generations[evac_gen].steps[0];
1230 bd->gen = step->gen;
1231 bd->link = step->new_large_objects;
1232 step->new_large_objects = bd;
1236 recordMutable((StgMutClosure *)p);
1240 /* -----------------------------------------------------------------------------
1241 Adding a MUT_CONS to an older generation.
1243 This is necessary from time to time when we end up with an
1244 old-to-new generation pointer in a non-mutable object. We defer
1245 the promotion until the next GC.
1246 -------------------------------------------------------------------------- */
1251 mkMutCons(StgClosure *ptr, generation *gen)
1256 step = &gen->steps[0];
1258 /* chain a new block onto the to-space for the destination step if
1261 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1265 q = (StgMutVar *)step->hp;
1266 step->hp += sizeofW(StgMutVar);
1268 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1270 recordOldToNewPtrs((StgMutClosure *)q);
1272 return (StgClosure *)q;
1275 /* -----------------------------------------------------------------------------
1278 This is called (eventually) for every live object in the system.
1280 The caller to evacuate specifies a desired generation in the
1281 evac_gen global variable. The following conditions apply to
1282 evacuating an object which resides in generation M when we're
1283 collecting up to generation N
1287 else evac to step->to
1289 if M < evac_gen evac to evac_gen, step 0
1291 if the object is already evacuated, then we check which generation
1294 if M >= evac_gen do nothing
1295 if M < evac_gen set failed_to_evac flag to indicate that we
1296 didn't manage to evacuate this object into evac_gen.
1298 -------------------------------------------------------------------------- */
1302 evacuate(StgClosure *q)
1307 const StgInfoTable *info;
1310 if (HEAP_ALLOCED(q)) {
1312 if (bd->gen->no > N) {
1313 /* Can't evacuate this object, because it's in a generation
1314 * older than the ones we're collecting. Let's hope that it's
1315 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1317 if (bd->gen->no < evac_gen) {
1319 failed_to_evac = rtsTrue;
1320 TICK_GC_FAILED_PROMOTION();
1324 step = bd->step->to;
1327 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1330 /* make sure the info pointer is into text space */
1331 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1332 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1335 if (info->type==RBH) {
1336 info = REVERT_INFOPTR(info);
1338 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1339 q, info_type(q), info, info_type_by_ip(info)));
1343 switch (info -> type) {
1347 nat size = bco_sizeW((StgBCO*)q);
1349 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1350 evacuate_large((P_)q, rtsFalse);
1353 /* just copy the block */
1354 to = copy(q,size,step);
1360 ASSERT(q->header.info != &stg_MUT_CONS_info);
1362 to = copy(q,sizeW_fromITBL(info),step);
1363 recordMutable((StgMutClosure *)to);
1368 StgWord w = (StgWord)q->payload[0];
1369 if (q->header.info == Czh_con_info &&
1370 /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */
1371 (StgChar)w <= MAX_CHARLIKE) {
1372 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1374 if (q->header.info == Izh_con_info &&
1375 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1376 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1378 /* else, fall through ... */
1384 return copy(q,sizeofW(StgHeader)+1,step);
1386 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1391 #ifdef NO_PROMOTE_THUNKS
1392 if (bd->gen->no == 0 &&
1393 bd->step->no != 0 &&
1394 bd->step->no == bd->gen->n_steps-1) {
1398 return copy(q,sizeofW(StgHeader)+2,step);
1406 return copy(q,sizeofW(StgHeader)+2,step);
1412 case IND_OLDGEN_PERM:
1418 return copy(q,sizeW_fromITBL(info),step);
1421 case SE_CAF_BLACKHOLE:
1424 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1427 to = copy(q,BLACKHOLE_sizeW(),step);
1428 recordMutable((StgMutClosure *)to);
1431 case THUNK_SELECTOR:
1433 const StgInfoTable* selectee_info;
1434 StgClosure* selectee = ((StgSelector*)q)->selectee;
1437 selectee_info = get_itbl(selectee);
1438 switch (selectee_info->type) {
1447 StgWord32 offset = info->layout.selector_offset;
1449 /* check that the size is in range */
1451 (StgWord32)(selectee_info->layout.payload.ptrs +
1452 selectee_info->layout.payload.nptrs));
1454 /* perform the selection! */
1455 q = selectee->payload[offset];
1457 /* if we're already in to-space, there's no need to continue
1458 * with the evacuation, just update the source address with
1459 * a pointer to the (evacuated) constructor field.
1461 if (HEAP_ALLOCED(q)) {
1462 bdescr *bd = Bdescr((P_)q);
1463 if (bd->evacuated) {
1464 if (bd->gen->no < evac_gen) {
1465 failed_to_evac = rtsTrue;
1466 TICK_GC_FAILED_PROMOTION();
1472 /* otherwise, carry on and evacuate this constructor field,
1473 * (but not the constructor itself)
1482 case IND_OLDGEN_PERM:
1483 selectee = ((StgInd *)selectee)->indirectee;
1487 selectee = ((StgCAF *)selectee)->value;
1491 selectee = ((StgEvacuated *)selectee)->evacuee;
1502 case THUNK_SELECTOR:
1503 /* aargh - do recursively???? */
1506 case SE_CAF_BLACKHOLE:
1510 /* not evaluated yet */
1514 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1515 (int)(selectee_info->type));
1518 return copy(q,THUNK_SELECTOR_sizeW(),step);
1522 /* follow chains of indirections, don't evacuate them */
1523 q = ((StgInd*)q)->indirectee;
1527 if (info->srt_len > 0 && major_gc &&
1528 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1529 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1530 static_objects = (StgClosure *)q;
1535 if (info->srt_len > 0 && major_gc &&
1536 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1537 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1538 static_objects = (StgClosure *)q;
1543 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1544 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1545 static_objects = (StgClosure *)q;
1550 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1551 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1552 static_objects = (StgClosure *)q;
1556 case CONSTR_INTLIKE:
1557 case CONSTR_CHARLIKE:
1558 case CONSTR_NOCAF_STATIC:
1559 /* no need to put these on the static linked list, they don't need
1574 /* shouldn't see these */
1575 barf("evacuate: stack frame at %p\n", q);
1579 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1580 * of stack, tagging and all.
1582 * They can be larger than a block in size. Both are only
1583 * allocated via allocate(), so they should be chained on to the
1584 * large_object list.
1587 nat size = pap_sizeW((StgPAP*)q);
1588 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1589 evacuate_large((P_)q, rtsFalse);
1592 return copy(q,size,step);
1597 /* Already evacuated, just return the forwarding address.
1598 * HOWEVER: if the requested destination generation (evac_gen) is
1599 * older than the actual generation (because the object was
1600 * already evacuated to a younger generation) then we have to
1601 * set the failed_to_evac flag to indicate that we couldn't
1602 * manage to promote the object to the desired generation.
1604 if (evac_gen > 0) { /* optimisation */
1605 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1606 if (Bdescr((P_)p)->gen->no < evac_gen) {
1607 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1608 failed_to_evac = rtsTrue;
1609 TICK_GC_FAILED_PROMOTION();
1612 return ((StgEvacuated*)q)->evacuee;
1616 nat size = arr_words_sizeW((StgArrWords *)q);
1618 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1619 evacuate_large((P_)q, rtsFalse);
1622 /* just copy the block */
1623 return copy(q,size,step);
1628 case MUT_ARR_PTRS_FROZEN:
1630 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1632 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1633 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1636 /* just copy the block */
1637 to = copy(q,size,step);
1638 if (info->type == MUT_ARR_PTRS) {
1639 recordMutable((StgMutClosure *)to);
1647 StgTSO *tso = (StgTSO *)q;
1648 nat size = tso_sizeW(tso);
1651 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1653 if (tso->what_next == ThreadRelocated) {
1654 q = (StgClosure *)tso->link;
1658 /* Large TSOs don't get moved, so no relocation is required.
1660 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1661 evacuate_large((P_)q, rtsTrue);
1664 /* To evacuate a small TSO, we need to relocate the update frame
1668 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1670 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1672 /* relocate the stack pointers... */
1673 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1674 new_tso->sp = (StgPtr)new_tso->sp + diff;
1676 relocate_TSO(tso, new_tso);
1678 recordMutable((StgMutClosure *)new_tso);
1679 return (StgClosure *)new_tso;
1684 case RBH: // cf. BLACKHOLE_BQ
1686 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1687 to = copy(q,BLACKHOLE_sizeW(),step);
1688 //ToDo: derive size etc from reverted IP
1689 //to = copy(q,size,step);
1690 recordMutable((StgMutClosure *)to);
1692 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1693 q, info_type(q), to, info_type(to)));
1698 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1699 to = copy(q,sizeofW(StgBlockedFetch),step);
1701 belch("@@ evacuate: %p (%s) to %p (%s)",
1702 q, info_type(q), to, info_type(to)));
1706 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1707 to = copy(q,sizeofW(StgFetchMe),step);
1709 belch("@@ evacuate: %p (%s) to %p (%s)",
1710 q, info_type(q), to, info_type(to)));
1714 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1715 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1717 belch("@@ evacuate: %p (%s) to %p (%s)",
1718 q, info_type(q), to, info_type(to)));
1723 barf("evacuate: strange closure type %d", (int)(info->type));
1729 /* -----------------------------------------------------------------------------
1730 relocate_TSO is called just after a TSO has been copied from src to
1731 dest. It adjusts the update frame list for the new location.
1732 -------------------------------------------------------------------------- */
1733 //@cindex relocate_TSO
1736 relocate_TSO(StgTSO *src, StgTSO *dest)
1743 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1747 while ((P_)su < dest->stack + dest->stack_size) {
1748 switch (get_itbl(su)->type) {
1750 /* GCC actually manages to common up these three cases! */
1753 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1758 cf = (StgCatchFrame *)su;
1759 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1764 sf = (StgSeqFrame *)su;
1765 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1774 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1782 //@node Scavenging, Reverting CAFs, Evacuation
1783 //@subsection Scavenging
1785 //@cindex scavenge_srt
1788 scavenge_srt(const StgInfoTable *info)
1790 StgClosure **srt, **srt_end;
1792 /* evacuate the SRT. If srt_len is zero, then there isn't an
1793 * srt field in the info table. That's ok, because we'll
1794 * never dereference it.
1796 srt = (StgClosure **)(info->srt);
1797 srt_end = srt + info->srt_len;
1798 for (; srt < srt_end; srt++) {
1799 /* Special-case to handle references to closures hiding out in DLLs, since
1800 double indirections required to get at those. The code generator knows
1801 which is which when generating the SRT, so it stores the (indirect)
1802 reference to the DLL closure in the table by first adding one to it.
1803 We check for this here, and undo the addition before evacuating it.
1805 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1806 closure that's fixed at link-time, and no extra magic is required.
1808 #ifdef ENABLE_WIN32_DLL_SUPPORT
1809 if ( (unsigned long)(*srt) & 0x1 ) {
1810 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1820 /* -----------------------------------------------------------------------------
1822 -------------------------------------------------------------------------- */
1825 scavengeTSO (StgTSO *tso)
1827 /* chase the link field for any TSOs on the same queue */
1828 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1829 if ( tso->why_blocked == BlockedOnMVar
1830 || tso->why_blocked == BlockedOnBlackHole
1831 || tso->why_blocked == BlockedOnException
1833 || tso->why_blocked == BlockedOnGA
1834 || tso->why_blocked == BlockedOnGA_NoSend
1837 tso->block_info.closure = evacuate(tso->block_info.closure);
1839 if ( tso->blocked_exceptions != NULL ) {
1840 tso->blocked_exceptions =
1841 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1843 /* scavenge this thread's stack */
1844 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1847 /* -----------------------------------------------------------------------------
1848 Scavenge a given step until there are no more objects in this step
1851 evac_gen is set by the caller to be either zero (for a step in a
1852 generation < N) or G where G is the generation of the step being
1855 We sometimes temporarily change evac_gen back to zero if we're
1856 scavenging a mutable object where early promotion isn't such a good
1858 -------------------------------------------------------------------------- */
1862 scavenge(step *step)
1865 const StgInfoTable *info;
1867 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1872 failed_to_evac = rtsFalse;
1874 /* scavenge phase - standard breadth-first scavenging of the
1878 while (bd != step->hp_bd || p < step->hp) {
1880 /* If we're at the end of this block, move on to the next block */
1881 if (bd != step->hp_bd && p == bd->free) {
1887 q = p; /* save ptr to object */
1889 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1890 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1892 info = get_itbl((StgClosure *)p);
1894 if (info->type==RBH)
1895 info = REVERT_INFOPTR(info);
1898 switch (info -> type) {
1902 StgBCO* bco = (StgBCO *)p;
1904 for (i = 0; i < bco->n_ptrs; i++) {
1905 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1907 p += bco_sizeW(bco);
1912 /* treat MVars specially, because we don't want to evacuate the
1913 * mut_link field in the middle of the closure.
1916 StgMVar *mvar = ((StgMVar *)p);
1918 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1919 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1920 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1921 p += sizeofW(StgMVar);
1922 evac_gen = saved_evac_gen;
1930 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1931 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1932 p += sizeofW(StgHeader) + 2;
1937 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1938 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1944 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1945 p += sizeofW(StgHeader) + 1;
1950 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1956 p += sizeofW(StgHeader) + 1;
1963 p += sizeofW(StgHeader) + 2;
1970 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1971 p += sizeofW(StgHeader) + 2;
1986 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1987 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1988 (StgClosure *)*p = evacuate((StgClosure *)*p);
1990 p += info->layout.payload.nptrs;
1995 if (step->gen->no != 0) {
1996 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
1999 case IND_OLDGEN_PERM:
2000 ((StgIndOldGen *)p)->indirectee =
2001 evacuate(((StgIndOldGen *)p)->indirectee);
2002 if (failed_to_evac) {
2003 failed_to_evac = rtsFalse;
2004 recordOldToNewPtrs((StgMutClosure *)p);
2006 p += sizeofW(StgIndOldGen);
2011 StgCAF *caf = (StgCAF *)p;
2013 caf->body = evacuate(caf->body);
2014 if (failed_to_evac) {
2015 failed_to_evac = rtsFalse;
2016 recordOldToNewPtrs((StgMutClosure *)p);
2018 caf->mut_link = NULL;
2020 p += sizeofW(StgCAF);
2026 StgCAF *caf = (StgCAF *)p;
2028 caf->body = evacuate(caf->body);
2029 caf->value = evacuate(caf->value);
2030 if (failed_to_evac) {
2031 failed_to_evac = rtsFalse;
2032 recordOldToNewPtrs((StgMutClosure *)p);
2034 caf->mut_link = NULL;
2036 p += sizeofW(StgCAF);
2041 /* ignore MUT_CONSs */
2042 if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) {
2044 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2045 evac_gen = saved_evac_gen;
2047 p += sizeofW(StgMutVar);
2051 case SE_CAF_BLACKHOLE:
2054 p += BLACKHOLE_sizeW();
2059 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2060 (StgClosure *)bh->blocking_queue =
2061 evacuate((StgClosure *)bh->blocking_queue);
2062 if (failed_to_evac) {
2063 failed_to_evac = rtsFalse;
2064 recordMutable((StgMutClosure *)bh);
2066 p += BLACKHOLE_sizeW();
2070 case THUNK_SELECTOR:
2072 StgSelector *s = (StgSelector *)p;
2073 s->selectee = evacuate(s->selectee);
2074 p += THUNK_SELECTOR_sizeW();
2080 barf("scavenge:IND???\n");
2082 case CONSTR_INTLIKE:
2083 case CONSTR_CHARLIKE:
2085 case CONSTR_NOCAF_STATIC:
2089 /* Shouldn't see a static object here. */
2090 barf("scavenge: STATIC object\n");
2102 /* Shouldn't see stack frames here. */
2103 barf("scavenge: stack frame\n");
2105 case AP_UPD: /* same as PAPs */
2107 /* Treat a PAP just like a section of stack, not forgetting to
2108 * evacuate the function pointer too...
2111 StgPAP* pap = (StgPAP *)p;
2113 pap->fun = evacuate(pap->fun);
2114 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2115 p += pap_sizeW(pap);
2120 /* nothing to follow */
2121 p += arr_words_sizeW((StgArrWords *)p);
2125 /* follow everything */
2129 evac_gen = 0; /* repeatedly mutable */
2130 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2131 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2132 (StgClosure *)*p = evacuate((StgClosure *)*p);
2134 evac_gen = saved_evac_gen;
2138 case MUT_ARR_PTRS_FROZEN:
2139 /* follow everything */
2141 StgPtr start = p, next;
2143 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2144 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2145 (StgClosure *)*p = evacuate((StgClosure *)*p);
2147 if (failed_to_evac) {
2148 /* we can do this easier... */
2149 recordMutable((StgMutClosure *)start);
2150 failed_to_evac = rtsFalse;
2157 StgTSO *tso = (StgTSO *)p;
2160 evac_gen = saved_evac_gen;
2161 p += tso_sizeW(tso);
2166 case RBH: // cf. BLACKHOLE_BQ
2168 // nat size, ptrs, nonptrs, vhs;
2170 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2171 StgRBH *rbh = (StgRBH *)p;
2172 (StgClosure *)rbh->blocking_queue =
2173 evacuate((StgClosure *)rbh->blocking_queue);
2174 if (failed_to_evac) {
2175 failed_to_evac = rtsFalse;
2176 recordMutable((StgMutClosure *)rbh);
2179 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2180 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2181 // ToDo: use size of reverted closure here!
2182 p += BLACKHOLE_sizeW();
2188 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2189 /* follow the pointer to the node which is being demanded */
2190 (StgClosure *)bf->node =
2191 evacuate((StgClosure *)bf->node);
2192 /* follow the link to the rest of the blocking queue */
2193 (StgClosure *)bf->link =
2194 evacuate((StgClosure *)bf->link);
2195 if (failed_to_evac) {
2196 failed_to_evac = rtsFalse;
2197 recordMutable((StgMutClosure *)bf);
2200 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2201 bf, info_type((StgClosure *)bf),
2202 bf->node, info_type(bf->node)));
2203 p += sizeofW(StgBlockedFetch);
2209 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2210 p, info_type((StgClosure *)p)));
2211 p += sizeofW(StgFetchMe);
2212 break; // nothing to do in this case
2214 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2216 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2217 (StgClosure *)fmbq->blocking_queue =
2218 evacuate((StgClosure *)fmbq->blocking_queue);
2219 if (failed_to_evac) {
2220 failed_to_evac = rtsFalse;
2221 recordMutable((StgMutClosure *)fmbq);
2224 belch("@@ scavenge: %p (%s) exciting, isn't it",
2225 p, info_type((StgClosure *)p)));
2226 p += sizeofW(StgFetchMeBlockingQueue);
2232 barf("scavenge: unimplemented/strange closure type %d @ %p",
2236 barf("scavenge: unimplemented/strange closure type %d @ %p",
2240 /* If we didn't manage to promote all the objects pointed to by
2241 * the current object, then we have to designate this object as
2242 * mutable (because it contains old-to-new generation pointers).
2244 if (failed_to_evac) {
2245 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2246 failed_to_evac = rtsFalse;
2254 /* -----------------------------------------------------------------------------
2255 Scavenge one object.
2257 This is used for objects that are temporarily marked as mutable
2258 because they contain old-to-new generation pointers. Only certain
2259 objects can have this property.
2260 -------------------------------------------------------------------------- */
2261 //@cindex scavenge_one
2264 scavenge_one(StgClosure *p)
2266 const StgInfoTable *info;
2269 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2270 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2275 if (info->type==RBH)
2276 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2279 switch (info -> type) {
2282 case FUN_1_0: /* hardly worth specialising these guys */
2302 case IND_OLDGEN_PERM:
2307 end = (P_)p->payload + info->layout.payload.ptrs;
2308 for (q = (P_)p->payload; q < end; q++) {
2309 (StgClosure *)*q = evacuate((StgClosure *)*q);
2315 case SE_CAF_BLACKHOLE:
2320 case THUNK_SELECTOR:
2322 StgSelector *s = (StgSelector *)p;
2323 s->selectee = evacuate(s->selectee);
2327 case AP_UPD: /* same as PAPs */
2329 /* Treat a PAP just like a section of stack, not forgetting to
2330 * evacuate the function pointer too...
2333 StgPAP* pap = (StgPAP *)p;
2335 pap->fun = evacuate(pap->fun);
2336 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2341 /* This might happen if for instance a MUT_CONS was pointing to a
2342 * THUNK which has since been updated. The IND_OLDGEN will
2343 * be on the mutable list anyway, so we don't need to do anything
2349 barf("scavenge_one: strange object %d", (int)(info->type));
2352 no_luck = failed_to_evac;
2353 failed_to_evac = rtsFalse;
2358 /* -----------------------------------------------------------------------------
2359 Scavenging mutable lists.
2361 We treat the mutable list of each generation > N (i.e. all the
2362 generations older than the one being collected) as roots. We also
2363 remove non-mutable objects from the mutable list at this point.
2364 -------------------------------------------------------------------------- */
2365 //@cindex scavenge_mut_once_list
2368 scavenge_mut_once_list(generation *gen)
2370 const StgInfoTable *info;
2371 StgMutClosure *p, *next, *new_list;
2373 p = gen->mut_once_list;
2374 new_list = END_MUT_LIST;
2378 failed_to_evac = rtsFalse;
2380 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2382 /* make sure the info pointer is into text space */
2383 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2384 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2388 if (info->type==RBH)
2389 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2391 switch(info->type) {
2394 case IND_OLDGEN_PERM:
2396 /* Try to pull the indirectee into this generation, so we can
2397 * remove the indirection from the mutable list.
2399 ((StgIndOldGen *)p)->indirectee =
2400 evacuate(((StgIndOldGen *)p)->indirectee);
2403 if (RtsFlags.DebugFlags.gc)
2404 /* Debugging code to print out the size of the thing we just
2408 StgPtr start = gen->steps[0].scan;
2409 bdescr *start_bd = gen->steps[0].scan_bd;
2411 scavenge(&gen->steps[0]);
2412 if (start_bd != gen->steps[0].scan_bd) {
2413 size += (P_)BLOCK_ROUND_UP(start) - start;
2414 start_bd = start_bd->link;
2415 while (start_bd != gen->steps[0].scan_bd) {
2416 size += BLOCK_SIZE_W;
2417 start_bd = start_bd->link;
2419 size += gen->steps[0].scan -
2420 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2422 size = gen->steps[0].scan - start;
2424 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2428 /* failed_to_evac might happen if we've got more than two
2429 * generations, we're collecting only generation 0, the
2430 * indirection resides in generation 2 and the indirectee is
2433 if (failed_to_evac) {
2434 failed_to_evac = rtsFalse;
2435 p->mut_link = new_list;
2438 /* the mut_link field of an IND_STATIC is overloaded as the
2439 * static link field too (it just so happens that we don't need
2440 * both at the same time), so we need to NULL it out when
2441 * removing this object from the mutable list because the static
2442 * link fields are all assumed to be NULL before doing a major
2450 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2451 * it from the mutable list if possible by promoting whatever it
2454 ASSERT(p->header.info == &stg_MUT_CONS_info);
2455 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2456 /* didn't manage to promote everything, so put the
2457 * MUT_CONS back on the list.
2459 p->mut_link = new_list;
2466 StgCAF *caf = (StgCAF *)p;
2467 caf->body = evacuate(caf->body);
2468 caf->value = evacuate(caf->value);
2469 if (failed_to_evac) {
2470 failed_to_evac = rtsFalse;
2471 p->mut_link = new_list;
2481 StgCAF *caf = (StgCAF *)p;
2482 caf->body = evacuate(caf->body);
2483 if (failed_to_evac) {
2484 failed_to_evac = rtsFalse;
2485 p->mut_link = new_list;
2494 /* shouldn't have anything else on the mutables list */
2495 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2499 gen->mut_once_list = new_list;
2502 //@cindex scavenge_mutable_list
2505 scavenge_mutable_list(generation *gen)
2507 const StgInfoTable *info;
2508 StgMutClosure *p, *next;
2510 p = gen->saved_mut_list;
2514 failed_to_evac = rtsFalse;
2516 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2518 /* make sure the info pointer is into text space */
2519 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2520 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2524 if (info->type==RBH)
2525 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2527 switch(info->type) {
2529 case MUT_ARR_PTRS_FROZEN:
2530 /* remove this guy from the mutable list, but follow the ptrs
2531 * anyway (and make sure they get promoted to this gen).
2536 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2538 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2539 (StgClosure *)*q = evacuate((StgClosure *)*q);
2543 if (failed_to_evac) {
2544 failed_to_evac = rtsFalse;
2545 p->mut_link = gen->mut_list;
2552 /* follow everything */
2553 p->mut_link = gen->mut_list;
2558 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2559 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2560 (StgClosure *)*q = evacuate((StgClosure *)*q);
2566 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2567 * it from the mutable list if possible by promoting whatever it
2570 ASSERT(p->header.info != &stg_MUT_CONS_info);
2571 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2572 p->mut_link = gen->mut_list;
2578 StgMVar *mvar = (StgMVar *)p;
2579 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2580 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2581 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2582 p->mut_link = gen->mut_list;
2589 StgTSO *tso = (StgTSO *)p;
2593 /* Don't take this TSO off the mutable list - it might still
2594 * point to some younger objects (because we set evac_gen to 0
2597 tso->mut_link = gen->mut_list;
2598 gen->mut_list = (StgMutClosure *)tso;
2604 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2605 (StgClosure *)bh->blocking_queue =
2606 evacuate((StgClosure *)bh->blocking_queue);
2607 p->mut_link = gen->mut_list;
2612 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2615 case IND_OLDGEN_PERM:
2616 /* Try to pull the indirectee into this generation, so we can
2617 * remove the indirection from the mutable list.
2620 ((StgIndOldGen *)p)->indirectee =
2621 evacuate(((StgIndOldGen *)p)->indirectee);
2624 if (failed_to_evac) {
2625 failed_to_evac = rtsFalse;
2626 p->mut_link = gen->mut_once_list;
2627 gen->mut_once_list = p;
2634 // HWL: check whether all of these are necessary
2636 case RBH: // cf. BLACKHOLE_BQ
2638 // nat size, ptrs, nonptrs, vhs;
2640 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2641 StgRBH *rbh = (StgRBH *)p;
2642 (StgClosure *)rbh->blocking_queue =
2643 evacuate((StgClosure *)rbh->blocking_queue);
2644 if (failed_to_evac) {
2645 failed_to_evac = rtsFalse;
2646 recordMutable((StgMutClosure *)rbh);
2648 // ToDo: use size of reverted closure here!
2649 p += BLACKHOLE_sizeW();
2655 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2656 /* follow the pointer to the node which is being demanded */
2657 (StgClosure *)bf->node =
2658 evacuate((StgClosure *)bf->node);
2659 /* follow the link to the rest of the blocking queue */
2660 (StgClosure *)bf->link =
2661 evacuate((StgClosure *)bf->link);
2662 if (failed_to_evac) {
2663 failed_to_evac = rtsFalse;
2664 recordMutable((StgMutClosure *)bf);
2666 p += sizeofW(StgBlockedFetch);
2671 p += sizeofW(StgFetchMe);
2672 break; // nothing to do in this case
2674 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2676 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2677 (StgClosure *)fmbq->blocking_queue =
2678 evacuate((StgClosure *)fmbq->blocking_queue);
2679 if (failed_to_evac) {
2680 failed_to_evac = rtsFalse;
2681 recordMutable((StgMutClosure *)fmbq);
2683 p += sizeofW(StgFetchMeBlockingQueue);
2689 /* shouldn't have anything else on the mutables list */
2690 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2695 //@cindex scavenge_static
2698 scavenge_static(void)
2700 StgClosure* p = static_objects;
2701 const StgInfoTable *info;
2703 /* Always evacuate straight to the oldest generation for static
2705 evac_gen = oldest_gen->no;
2707 /* keep going until we've scavenged all the objects on the linked
2709 while (p != END_OF_STATIC_LIST) {
2713 if (info->type==RBH)
2714 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2716 /* make sure the info pointer is into text space */
2717 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2718 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2720 /* Take this object *off* the static_objects list,
2721 * and put it on the scavenged_static_objects list.
2723 static_objects = STATIC_LINK(info,p);
2724 STATIC_LINK(info,p) = scavenged_static_objects;
2725 scavenged_static_objects = p;
2727 switch (info -> type) {
2731 StgInd *ind = (StgInd *)p;
2732 ind->indirectee = evacuate(ind->indirectee);
2734 /* might fail to evacuate it, in which case we have to pop it
2735 * back on the mutable list (and take it off the
2736 * scavenged_static list because the static link and mut link
2737 * pointers are one and the same).
2739 if (failed_to_evac) {
2740 failed_to_evac = rtsFalse;
2741 scavenged_static_objects = STATIC_LINK(info,p);
2742 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2743 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2757 next = (P_)p->payload + info->layout.payload.ptrs;
2758 /* evacuate the pointers */
2759 for (q = (P_)p->payload; q < next; q++) {
2760 (StgClosure *)*q = evacuate((StgClosure *)*q);
2766 barf("scavenge_static: strange closure %d", (int)(info->type));
2769 ASSERT(failed_to_evac == rtsFalse);
2771 /* get the next static object from the list. Remember, there might
2772 * be more stuff on this list now that we've done some evacuating!
2773 * (static_objects is a global)
2779 /* -----------------------------------------------------------------------------
2780 scavenge_stack walks over a section of stack and evacuates all the
2781 objects pointed to by it. We can use the same code for walking
2782 PAPs, since these are just sections of copied stack.
2783 -------------------------------------------------------------------------- */
2784 //@cindex scavenge_stack
2787 scavenge_stack(StgPtr p, StgPtr stack_end)
2790 const StgInfoTable* info;
2793 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2796 * Each time around this loop, we are looking at a chunk of stack
2797 * that starts with either a pending argument section or an
2798 * activation record.
2801 while (p < stack_end) {
2804 /* If we've got a tag, skip over that many words on the stack */
2805 if (IS_ARG_TAG((W_)q)) {
2810 /* Is q a pointer to a closure?
2812 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2814 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2815 ASSERT(closure_STATIC((StgClosure *)q));
2817 /* otherwise, must be a pointer into the allocation space. */
2820 (StgClosure *)*p = evacuate((StgClosure *)q);
2826 * Otherwise, q must be the info pointer of an activation
2827 * record. All activation records have 'bitmap' style layout
2830 info = get_itbl((StgClosure *)p);
2832 switch (info->type) {
2834 /* Dynamic bitmap: the mask is stored on the stack */
2836 bitmap = ((StgRetDyn *)p)->liveness;
2837 p = (P_)&((StgRetDyn *)p)->payload[0];
2840 /* probably a slow-entry point return address: */
2848 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2849 old_p, p, old_p+1));
2851 p++; /* what if FHS!=1 !? -- HWL */
2856 /* Specialised code for update frames, since they're so common.
2857 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2858 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2862 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2864 nat type = get_itbl(frame->updatee)->type;
2866 p += sizeofW(StgUpdateFrame);
2867 if (type == EVACUATED) {
2868 frame->updatee = evacuate(frame->updatee);
2871 bdescr *bd = Bdescr((P_)frame->updatee);
2873 if (bd->gen->no > N) {
2874 if (bd->gen->no < evac_gen) {
2875 failed_to_evac = rtsTrue;
2880 /* Don't promote blackholes */
2882 if (!(step->gen->no == 0 &&
2884 step->no == step->gen->n_steps-1)) {
2891 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2892 sizeofW(StgHeader), step);
2893 frame->updatee = to;
2896 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2897 frame->updatee = to;
2898 recordMutable((StgMutClosure *)to);
2901 /* will never be SE_{,CAF_}BLACKHOLE, since we
2902 don't push an update frame for single-entry thunks. KSW 1999-01. */
2903 barf("scavenge_stack: UPDATE_FRAME updatee");
2908 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2915 bitmap = info->layout.bitmap;
2917 /* this assumes that the payload starts immediately after the info-ptr */
2919 while (bitmap != 0) {
2920 if ((bitmap & 1) == 0) {
2921 (StgClosure *)*p = evacuate((StgClosure *)*p);
2924 bitmap = bitmap >> 1;
2931 /* large bitmap (> 32 entries) */
2936 StgLargeBitmap *large_bitmap;
2939 large_bitmap = info->layout.large_bitmap;
2942 for (i=0; i<large_bitmap->size; i++) {
2943 bitmap = large_bitmap->bitmap[i];
2944 q = p + sizeof(W_) * 8;
2945 while (bitmap != 0) {
2946 if ((bitmap & 1) == 0) {
2947 (StgClosure *)*p = evacuate((StgClosure *)*p);
2950 bitmap = bitmap >> 1;
2952 if (i+1 < large_bitmap->size) {
2954 (StgClosure *)*p = evacuate((StgClosure *)*p);
2960 /* and don't forget to follow the SRT */
2965 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2970 /*-----------------------------------------------------------------------------
2971 scavenge the large object list.
2973 evac_gen set by caller; similar games played with evac_gen as with
2974 scavenge() - see comment at the top of scavenge(). Most large
2975 objects are (repeatedly) mutable, so most of the time evac_gen will
2977 --------------------------------------------------------------------------- */
2978 //@cindex scavenge_large
2981 scavenge_large(step *step)
2985 const StgInfoTable* info;
2986 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2988 evac_gen = 0; /* most objects are mutable */
2989 bd = step->new_large_objects;
2991 for (; bd != NULL; bd = step->new_large_objects) {
2993 /* take this object *off* the large objects list and put it on
2994 * the scavenged large objects list. This is so that we can
2995 * treat new_large_objects as a stack and push new objects on
2996 * the front when evacuating.
2998 step->new_large_objects = bd->link;
2999 dbl_link_onto(bd, &step->scavenged_large_objects);
3002 info = get_itbl((StgClosure *)p);
3004 switch (info->type) {
3006 /* only certain objects can be "large"... */
3009 /* nothing to follow */
3013 /* follow everything */
3017 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3018 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3019 (StgClosure *)*p = evacuate((StgClosure *)*p);
3024 case MUT_ARR_PTRS_FROZEN:
3025 /* follow everything */
3027 StgPtr start = p, next;
3029 evac_gen = saved_evac_gen; /* not really mutable */
3030 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3031 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3032 (StgClosure *)*p = evacuate((StgClosure *)*p);
3035 if (failed_to_evac) {
3036 recordMutable((StgMutClosure *)start);
3043 StgBCO* bco = (StgBCO *)p;
3045 evac_gen = saved_evac_gen;
3046 for (i = 0; i < bco->n_ptrs; i++) {
3047 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
3054 scavengeTSO((StgTSO *)p);
3060 StgPAP* pap = (StgPAP *)p;
3062 evac_gen = saved_evac_gen; /* not really mutable */
3063 pap->fun = evacuate(pap->fun);
3064 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3070 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3075 //@cindex zero_static_object_list
3078 zero_static_object_list(StgClosure* first_static)
3082 const StgInfoTable *info;
3084 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3086 link = STATIC_LINK(info, p);
3087 STATIC_LINK(info,p) = NULL;
3091 /* This function is only needed because we share the mutable link
3092 * field with the static link field in an IND_STATIC, so we have to
3093 * zero the mut_link field before doing a major GC, which needs the
3094 * static link field.
3096 * It doesn't do any harm to zero all the mutable link fields on the
3099 //@cindex zero_mutable_list
3102 zero_mutable_list( StgMutClosure *first )
3104 StgMutClosure *next, *c;
3106 for (c = first; c != END_MUT_LIST; c = next) {
3112 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3113 //@subsection Reverting CAFs
3115 /* -----------------------------------------------------------------------------
3117 -------------------------------------------------------------------------- */
3118 //@cindex RevertCAFs
3120 void RevertCAFs(void)
3125 /* Deal with CAFs created by compiled code. */
3126 for (i = 0; i < usedECafTable; i++) {
3127 SET_INFO( (StgInd*)(ecafTable[i].closure), ecafTable[i].origItbl );
3128 ((StgInd*)(ecafTable[i].closure))->indirectee = 0;
3131 /* Deal with CAFs created by the interpreter. */
3132 while (ecafList != END_ECAF_LIST) {
3133 StgCAF* caf = ecafList;
3134 ecafList = caf->link;
3135 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3136 SET_INFO(caf,&CAF_UNENTERED_info);
3137 caf->value = (StgClosure *)0xdeadbeef;
3138 caf->link = (StgCAF *)0xdeadbeef;
3141 /* Empty out both the table and the list. */
3143 ecafList = END_ECAF_LIST;
3147 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3148 //@subsection Sanity code for CAF garbage collection
3150 /* -----------------------------------------------------------------------------
3151 Sanity code for CAF garbage collection.
3153 With DEBUG turned on, we manage a CAF list in addition to the SRT
3154 mechanism. After GC, we run down the CAF list and blackhole any
3155 CAFs which have been garbage collected. This means we get an error
3156 whenever the program tries to enter a garbage collected CAF.
3158 Any garbage collected CAFs are taken off the CAF list at the same
3160 -------------------------------------------------------------------------- */
3170 const StgInfoTable *info;
3181 ASSERT(info->type == IND_STATIC);
3183 if (STATIC_LINK(info,p) == NULL) {
3184 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3186 SET_INFO(p,&stg_BLACKHOLE_info);
3187 p = STATIC_LINK2(info,p);
3191 pp = &STATIC_LINK2(info,p);
3198 /* fprintf(stderr, "%d CAFs live\n", i); */
3202 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3203 //@subsection Lazy black holing
3205 /* -----------------------------------------------------------------------------
3208 Whenever a thread returns to the scheduler after possibly doing
3209 some work, we have to run down the stack and black-hole all the
3210 closures referred to by update frames.
3211 -------------------------------------------------------------------------- */
3212 //@cindex threadLazyBlackHole
3215 threadLazyBlackHole(StgTSO *tso)
3217 StgUpdateFrame *update_frame;
3218 StgBlockingQueue *bh;
3221 stack_end = &tso->stack[tso->stack_size];
3222 update_frame = tso->su;
3225 switch (get_itbl(update_frame)->type) {
3228 update_frame = ((StgCatchFrame *)update_frame)->link;
3232 bh = (StgBlockingQueue *)update_frame->updatee;
3234 /* if the thunk is already blackholed, it means we've also
3235 * already blackholed the rest of the thunks on this stack,
3236 * so we can stop early.
3238 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3239 * don't interfere with this optimisation.
3241 if (bh->header.info == &stg_BLACKHOLE_info) {
3245 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3246 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3247 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3248 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3250 SET_INFO(bh,&stg_BLACKHOLE_info);
3253 update_frame = update_frame->link;
3257 update_frame = ((StgSeqFrame *)update_frame)->link;
3263 barf("threadPaused");
3268 //@node Stack squeezing, Pausing a thread, Lazy black holing
3269 //@subsection Stack squeezing
3271 /* -----------------------------------------------------------------------------
3274 * Code largely pinched from old RTS, then hacked to bits. We also do
3275 * lazy black holing here.
3277 * -------------------------------------------------------------------------- */
3278 //@cindex threadSqueezeStack
3281 threadSqueezeStack(StgTSO *tso)
3283 lnat displacement = 0;
3284 StgUpdateFrame *frame;
3285 StgUpdateFrame *next_frame; /* Temporally next */
3286 StgUpdateFrame *prev_frame; /* Temporally previous */
3288 rtsBool prev_was_update_frame;
3290 StgUpdateFrame *top_frame;
3291 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3293 void printObj( StgClosure *obj ); // from Printer.c
3295 top_frame = tso->su;
3298 bottom = &(tso->stack[tso->stack_size]);
3301 /* There must be at least one frame, namely the STOP_FRAME.
3303 ASSERT((P_)frame < bottom);
3305 /* Walk down the stack, reversing the links between frames so that
3306 * we can walk back up as we squeeze from the bottom. Note that
3307 * next_frame and prev_frame refer to next and previous as they were
3308 * added to the stack, rather than the way we see them in this
3309 * walk. (It makes the next loop less confusing.)
3311 * Stop if we find an update frame pointing to a black hole
3312 * (see comment in threadLazyBlackHole()).
3316 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3317 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3318 prev_frame = frame->link;
3319 frame->link = next_frame;
3324 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3325 printObj((StgClosure *)prev_frame);
3326 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3329 switch (get_itbl(frame)->type) {
3330 case UPDATE_FRAME: upd_frames++;
3331 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3334 case STOP_FRAME: stop_frames++;
3336 case CATCH_FRAME: catch_frames++;
3338 case SEQ_FRAME: seq_frames++;
3341 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3343 printObj((StgClosure *)prev_frame);
3346 if (get_itbl(frame)->type == UPDATE_FRAME
3347 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3352 /* Now, we're at the bottom. Frame points to the lowest update
3353 * frame on the stack, and its link actually points to the frame
3354 * above. We have to walk back up the stack, squeezing out empty
3355 * update frames and turning the pointers back around on the way
3358 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3359 * we never want to eliminate it anyway. Just walk one step up
3360 * before starting to squeeze. When you get to the topmost frame,
3361 * remember that there are still some words above it that might have
3368 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3371 * Loop through all of the frames (everything except the very
3372 * bottom). Things are complicated by the fact that we have
3373 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3374 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3376 while (frame != NULL) {
3378 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3379 rtsBool is_update_frame;
3381 next_frame = frame->link;
3382 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3385 * 1. both the previous and current frame are update frames
3386 * 2. the current frame is empty
3388 if (prev_was_update_frame && is_update_frame &&
3389 (P_)prev_frame == frame_bottom + displacement) {
3391 /* Now squeeze out the current frame */
3392 StgClosure *updatee_keep = prev_frame->updatee;
3393 StgClosure *updatee_bypass = frame->updatee;
3396 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3400 /* Deal with blocking queues. If both updatees have blocked
3401 * threads, then we should merge the queues into the update
3402 * frame that we're keeping.
3404 * Alternatively, we could just wake them up: they'll just go
3405 * straight to sleep on the proper blackhole! This is less code
3406 * and probably less bug prone, although it's probably much
3409 #if 0 /* do it properly... */
3410 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3411 # error Unimplemented lazy BH warning. (KSW 1999-01)
3413 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3414 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3416 /* Sigh. It has one. Don't lose those threads! */
3417 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3418 /* Urgh. Two queues. Merge them. */
3419 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3421 while (keep_tso->link != END_TSO_QUEUE) {
3422 keep_tso = keep_tso->link;
3424 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3427 /* For simplicity, just swap the BQ for the BH */
3428 P_ temp = updatee_keep;
3430 updatee_keep = updatee_bypass;
3431 updatee_bypass = temp;
3433 /* Record the swap in the kept frame (below) */
3434 prev_frame->updatee = updatee_keep;
3439 TICK_UPD_SQUEEZED();
3440 /* wasn't there something about update squeezing and ticky to be
3441 * sorted out? oh yes: we aren't counting each enter properly
3442 * in this case. See the log somewhere. KSW 1999-04-21
3444 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3446 sp = (P_)frame - 1; /* sp = stuff to slide */
3447 displacement += sizeofW(StgUpdateFrame);
3450 /* No squeeze for this frame */
3451 sp = frame_bottom - 1; /* Keep the current frame */
3453 /* Do lazy black-holing.
3455 if (is_update_frame) {
3456 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3457 if (bh->header.info != &stg_BLACKHOLE_info &&
3458 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3459 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3460 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3461 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3464 /* zero out the slop so that the sanity checker can tell
3465 * where the next closure is.
3468 StgInfoTable *info = get_itbl(bh);
3469 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3470 for (i = np; i < np + nw; i++) {
3471 ((StgClosure *)bh)->payload[i] = 0;
3475 SET_INFO(bh,&stg_BLACKHOLE_info);
3479 /* Fix the link in the current frame (should point to the frame below) */
3480 frame->link = prev_frame;
3481 prev_was_update_frame = is_update_frame;
3484 /* Now slide all words from sp up to the next frame */
3486 if (displacement > 0) {
3487 P_ next_frame_bottom;
3489 if (next_frame != NULL)
3490 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3492 next_frame_bottom = tso->sp - 1;
3496 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3500 while (sp >= next_frame_bottom) {
3501 sp[displacement] = *sp;
3505 (P_)prev_frame = (P_)frame + displacement;
3509 tso->sp += displacement;
3510 tso->su = prev_frame;
3513 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3514 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3518 //@node Pausing a thread, Index, Stack squeezing
3519 //@subsection Pausing a thread
3521 /* -----------------------------------------------------------------------------
3524 * We have to prepare for GC - this means doing lazy black holing
3525 * here. We also take the opportunity to do stack squeezing if it's
3527 * -------------------------------------------------------------------------- */
3528 //@cindex threadPaused
3530 threadPaused(StgTSO *tso)
3532 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3533 threadSqueezeStack(tso); /* does black holing too */
3535 threadLazyBlackHole(tso);
3538 /* -----------------------------------------------------------------------------
3540 * -------------------------------------------------------------------------- */
3543 //@cindex printMutOnceList
3545 printMutOnceList(generation *gen)
3547 StgMutClosure *p, *next;
3549 p = gen->mut_once_list;
3552 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3553 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3554 fprintf(stderr, "%p (%s), ",
3555 p, info_type((StgClosure *)p));
3557 fputc('\n', stderr);
3560 //@cindex printMutableList
3562 printMutableList(generation *gen)
3564 StgMutClosure *p, *next;
3569 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3570 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3571 fprintf(stderr, "%p (%s), ",
3572 p, info_type((StgClosure *)p));
3574 fputc('\n', stderr);
3577 //@cindex maybeLarge
3578 static inline rtsBool
3579 maybeLarge(StgClosure *closure)
3581 StgInfoTable *info = get_itbl(closure);
3583 /* closure types that may be found on the new_large_objects list;
3584 see scavenge_large */
3585 return (info->type == MUT_ARR_PTRS ||
3586 info->type == MUT_ARR_PTRS_FROZEN ||
3587 info->type == TSO ||
3588 info->type == ARR_WORDS ||
3595 //@node Index, , Pausing a thread
3599 //* GarbageCollect:: @cindex\s-+GarbageCollect
3600 //* MarkRoot:: @cindex\s-+MarkRoot
3601 //* RevertCAFs:: @cindex\s-+RevertCAFs
3602 //* addBlock:: @cindex\s-+addBlock
3603 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3604 //* copy:: @cindex\s-+copy
3605 //* copyPart:: @cindex\s-+copyPart
3606 //* evacuate:: @cindex\s-+evacuate
3607 //* evacuate_large:: @cindex\s-+evacuate_large
3608 //* gcCAFs:: @cindex\s-+gcCAFs
3609 //* isAlive:: @cindex\s-+isAlive
3610 //* maybeLarge:: @cindex\s-+maybeLarge
3611 //* mkMutCons:: @cindex\s-+mkMutCons
3612 //* printMutOnceList:: @cindex\s-+printMutOnceList
3613 //* printMutableList:: @cindex\s-+printMutableList
3614 //* relocate_TSO:: @cindex\s-+relocate_TSO
3615 //* scavenge:: @cindex\s-+scavenge
3616 //* scavenge_large:: @cindex\s-+scavenge_large
3617 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3618 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3619 //* scavenge_one:: @cindex\s-+scavenge_one
3620 //* scavenge_srt:: @cindex\s-+scavenge_srt
3621 //* scavenge_stack:: @cindex\s-+scavenge_stack
3622 //* scavenge_static:: @cindex\s-+scavenge_static
3623 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3624 //* threadPaused:: @cindex\s-+threadPaused
3625 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3626 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3627 //* upd_evacuee:: @cindex\s-+upd_evacuee
3628 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3629 //* zero_static_object_list:: @cindex\s-+zero_static_object_list