1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.100 2001/03/22 03:51:10 hwloidl Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
45 #include "StablePriv.h"
47 #include "ParTicky.h" // ToDo: move into Rts.h
48 #if defined(GRAN) || defined(PAR)
49 # include "GranSimRts.h"
50 # include "ParallelRts.h"
54 # include "ParallelDebug.h"
59 #if defined(RTS_GTK_FRONTPANEL)
60 #include "FrontPanel.h"
63 //@node STATIC OBJECT LIST, Static function declarations, Includes
64 //@subsection STATIC OBJECT LIST
66 /* STATIC OBJECT LIST.
69 * We maintain a linked list of static objects that are still live.
70 * The requirements for this list are:
72 * - we need to scan the list while adding to it, in order to
73 * scavenge all the static objects (in the same way that
74 * breadth-first scavenging works for dynamic objects).
76 * - we need to be able to tell whether an object is already on
77 * the list, to break loops.
79 * Each static object has a "static link field", which we use for
80 * linking objects on to the list. We use a stack-type list, consing
81 * objects on the front as they are added (this means that the
82 * scavenge phase is depth-first, not breadth-first, but that
85 * A separate list is kept for objects that have been scavenged
86 * already - this is so that we can zero all the marks afterwards.
88 * An object is on the list if its static link field is non-zero; this
89 * means that we have to mark the end of the list with '1', not NULL.
91 * Extra notes for generational GC:
93 * Each generation has a static object list associated with it. When
94 * collecting generations up to N, we treat the static object lists
95 * from generations > N as roots.
97 * We build up a static object list while collecting generations 0..N,
98 * which is then appended to the static object list of generation N+1.
100 StgClosure* static_objects; /* live static objects */
101 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
103 /* N is the oldest generation being collected, where the generations
104 * are numbered starting at 0. A major GC (indicated by the major_gc
105 * flag) is when we're collecting all generations. We only attempt to
106 * deal with static objects and GC CAFs when doing a major GC.
109 static rtsBool major_gc;
111 /* Youngest generation that objects should be evacuated to in
112 * evacuate(). (Logically an argument to evacuate, but it's static
113 * a lot of the time so we optimise it into a global variable).
119 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
120 static rtsBool weak_done; /* all done for this pass */
122 /* List of all threads during GC
124 static StgTSO *old_all_threads;
125 static StgTSO *resurrected_threads;
127 /* Flag indicating failure to evacuate an object to the desired
130 static rtsBool failed_to_evac;
132 /* Old to-space (used for two-space collector only)
134 bdescr *old_to_space;
136 /* Data used for allocation area sizing.
138 lnat new_blocks; /* blocks allocated during this GC */
139 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
141 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
142 //@subsection Static function declarations
144 /* -----------------------------------------------------------------------------
145 Static function declarations
146 -------------------------------------------------------------------------- */
148 static StgClosure * evacuate ( StgClosure *q );
149 static void zero_static_object_list ( StgClosure* first_static );
150 static void zero_mutable_list ( StgMutClosure *first );
152 static rtsBool traverse_weak_ptr_list ( void );
153 static void cleanup_weak_ptr_list ( StgWeak **list );
155 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
156 static void scavenge_large ( step * );
157 static void scavenge ( step * );
158 static void scavenge_static ( void );
159 static void scavenge_mutable_list ( generation *g );
160 static void scavenge_mut_once_list ( generation *g );
163 static void gcCAFs ( void );
166 void revertCAFs ( void );
167 void scavengeCAFs ( void );
169 //@node Garbage Collect, Weak Pointers, Static function declarations
170 //@subsection Garbage Collect
172 /* -----------------------------------------------------------------------------
175 For garbage collecting generation N (and all younger generations):
177 - follow all pointers in the root set. the root set includes all
178 mutable objects in all steps in all generations.
180 - for each pointer, evacuate the object it points to into either
181 + to-space in the next higher step in that generation, if one exists,
182 + if the object's generation == N, then evacuate it to the next
183 generation if one exists, or else to-space in the current
185 + if the object's generation < N, then evacuate it to to-space
186 in the next generation.
188 - repeatedly scavenge to-space from each step in each generation
189 being collected until no more objects can be evacuated.
191 - free from-space in each step, and set from-space = to-space.
193 -------------------------------------------------------------------------- */
194 //@cindex GarbageCollect
196 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
200 lnat live, allocated, collected = 0, copied = 0;
204 CostCentreStack *prev_CCS;
207 #if defined(DEBUG) && defined(GRAN)
208 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
212 /* tell the stats department that we've started a GC */
215 /* Init stats and print par specific (timing) info */
216 PAR_TICKY_PAR_START();
218 /* attribute any costs to CCS_GC */
224 /* Approximate how much we allocated.
225 * Todo: only when generating stats?
227 allocated = calcAllocated();
229 /* Figure out which generation to collect
231 if (force_major_gc) {
232 N = RtsFlags.GcFlags.generations - 1;
236 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
237 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
241 major_gc = (N == RtsFlags.GcFlags.generations-1);
244 #ifdef RTS_GTK_FRONTPANEL
245 if (RtsFlags.GcFlags.frontpanel) {
246 updateFrontPanelBeforeGC(N);
250 /* check stack sanity *before* GC (ToDo: check all threads) */
252 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
254 IF_DEBUG(sanity, checkFreeListSanity());
256 /* Initialise the static object lists
258 static_objects = END_OF_STATIC_LIST;
259 scavenged_static_objects = END_OF_STATIC_LIST;
261 /* zero the mutable list for the oldest generation (see comment by
262 * zero_mutable_list below).
265 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
268 /* Save the old to-space if we're doing a two-space collection
270 if (RtsFlags.GcFlags.generations == 1) {
271 old_to_space = g0s0->to_space;
272 g0s0->to_space = NULL;
275 /* Keep a count of how many new blocks we allocated during this GC
276 * (used for resizing the allocation area, later).
280 /* Initialise to-space in all the generations/steps that we're
283 for (g = 0; g <= N; g++) {
284 generations[g].mut_once_list = END_MUT_LIST;
285 generations[g].mut_list = END_MUT_LIST;
287 for (s = 0; s < generations[g].n_steps; s++) {
289 /* generation 0, step 0 doesn't need to-space */
290 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
294 /* Get a free block for to-space. Extra blocks will be chained on
298 stp = &generations[g].steps[s];
299 ASSERT(stp->gen->no == g);
300 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
301 bd->gen = &generations[g];
304 bd->evacuated = 1; /* it's a to-space block */
306 stp->hpLim = stp->hp + BLOCK_SIZE_W;
310 stp->scan = bd->start;
312 stp->new_large_objects = NULL;
313 stp->scavenged_large_objects = NULL;
315 /* mark the large objects as not evacuated yet */
316 for (bd = stp->large_objects; bd; bd = bd->link) {
322 /* make sure the older generations have at least one block to
323 * allocate into (this makes things easier for copy(), see below.
325 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
326 for (s = 0; s < generations[g].n_steps; s++) {
327 stp = &generations[g].steps[s];
328 if (stp->hp_bd == NULL) {
330 bd->gen = &generations[g];
333 bd->evacuated = 0; /* *not* a to-space block */
335 stp->hpLim = stp->hp + BLOCK_SIZE_W;
341 /* Set the scan pointer for older generations: remember we
342 * still have to scavenge objects that have been promoted. */
344 stp->scan_bd = stp->hp_bd;
345 stp->to_space = NULL;
347 stp->new_large_objects = NULL;
348 stp->scavenged_large_objects = NULL;
352 /* -----------------------------------------------------------------------
353 * follow all the roots that we know about:
354 * - mutable lists from each generation > N
355 * we want to *scavenge* these roots, not evacuate them: they're not
356 * going to move in this GC.
357 * Also: do them in reverse generation order. This is because we
358 * often want to promote objects that are pointed to by older
359 * generations early, so we don't have to repeatedly copy them.
360 * Doing the generations in reverse order ensures that we don't end
361 * up in the situation where we want to evac an object to gen 3 and
362 * it has already been evaced to gen 2.
366 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
367 generations[g].saved_mut_list = generations[g].mut_list;
368 generations[g].mut_list = END_MUT_LIST;
371 /* Do the mut-once lists first */
372 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
373 IF_PAR_DEBUG(verbose,
374 printMutOnceList(&generations[g]));
375 scavenge_mut_once_list(&generations[g]);
377 for (st = generations[g].n_steps-1; st >= 0; st--) {
378 scavenge(&generations[g].steps[st]);
382 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
383 IF_PAR_DEBUG(verbose,
384 printMutableList(&generations[g]));
385 scavenge_mutable_list(&generations[g]);
387 for (st = generations[g].n_steps-1; st >= 0; st--) {
388 scavenge(&generations[g].steps[st]);
395 /* follow all the roots that the application knows about.
401 /* And don't forget to mark the TSO if we got here direct from
403 /* Not needed in a seq version?
405 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
409 /* Mark the entries in the GALA table of the parallel system */
410 markLocalGAs(major_gc);
411 /* Mark all entries on the list of pending fetches */
412 markPendingFetches(major_gc);
415 /* Mark the weak pointer list, and prepare to detect dead weak
418 old_weak_ptr_list = weak_ptr_list;
419 weak_ptr_list = NULL;
420 weak_done = rtsFalse;
422 /* The all_threads list is like the weak_ptr_list.
423 * See traverse_weak_ptr_list() for the details.
425 old_all_threads = all_threads;
426 all_threads = END_TSO_QUEUE;
427 resurrected_threads = END_TSO_QUEUE;
429 /* Mark the stable pointer table.
431 markStablePtrTable(major_gc);
435 /* ToDo: To fix the caf leak, we need to make the commented out
436 * parts of this code do something sensible - as described in
439 extern void markHugsObjects(void);
444 /* -------------------------------------------------------------------------
445 * Repeatedly scavenge all the areas we know about until there's no
446 * more scavenging to be done.
453 /* scavenge static objects */
454 if (major_gc && static_objects != END_OF_STATIC_LIST) {
456 checkStaticObjects());
460 /* When scavenging the older generations: Objects may have been
461 * evacuated from generations <= N into older generations, and we
462 * need to scavenge these objects. We're going to try to ensure that
463 * any evacuations that occur move the objects into at least the
464 * same generation as the object being scavenged, otherwise we
465 * have to create new entries on the mutable list for the older
469 /* scavenge each step in generations 0..maxgen */
473 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
474 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
475 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
478 stp = &generations[gen].steps[st];
480 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
485 if (stp->new_large_objects != NULL) {
493 if (flag) { goto loop; }
495 /* must be last... */
496 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
501 /* Final traversal of the weak pointer list (see comment by
502 * cleanUpWeakPtrList below).
504 cleanup_weak_ptr_list(&weak_ptr_list);
506 /* Now see which stable names are still alive.
508 gcStablePtrTable(major_gc);
511 /* Reconstruct the Global Address tables used in GUM */
512 rebuildGAtables(major_gc);
513 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
514 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
517 /* Set the maximum blocks for the oldest generation, based on twice
518 * the amount of live data now, adjusted to fit the maximum heap
521 * This is an approximation, since in the worst case we'll need
522 * twice the amount of live data plus whatever space the other
525 if (RtsFlags.GcFlags.generations > 1) {
527 oldest_gen->max_blocks =
528 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
529 RtsFlags.GcFlags.minOldGenSize);
530 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
531 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
532 if (((int)oldest_gen->max_blocks -
533 (int)oldest_gen->steps[0].to_blocks) <
534 (RtsFlags.GcFlags.pcFreeHeap *
535 RtsFlags.GcFlags.maxHeapSize / 200)) {
542 /* run through all the generations/steps and tidy up
544 copied = new_blocks * BLOCK_SIZE_W;
545 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
548 generations[g].collections++; /* for stats */
551 for (s = 0; s < generations[g].n_steps; s++) {
553 stp = &generations[g].steps[s];
555 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
556 /* Tidy the end of the to-space chains */
557 stp->hp_bd->free = stp->hp;
558 stp->hp_bd->link = NULL;
559 /* stats information: how much we copied */
561 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
566 /* for generations we collected... */
569 collected += stp->n_blocks * BLOCK_SIZE_W; /* for stats */
571 /* free old memory and shift to-space into from-space for all
572 * the collected steps (except the allocation area). These
573 * freed blocks will probaby be quickly recycled.
575 if (!(g == 0 && s == 0)) {
576 freeChain(stp->blocks);
577 stp->blocks = stp->to_space;
578 stp->n_blocks = stp->to_blocks;
579 stp->to_space = NULL;
581 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
582 bd->evacuated = 0; /* now from-space */
586 /* LARGE OBJECTS. The current live large objects are chained on
587 * scavenged_large, having been moved during garbage
588 * collection from large_objects. Any objects left on
589 * large_objects list are therefore dead, so we free them here.
591 for (bd = stp->large_objects; bd != NULL; bd = next) {
596 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
599 stp->large_objects = stp->scavenged_large_objects;
601 /* Set the maximum blocks for this generation, interpolating
602 * between the maximum size of the oldest and youngest
605 * max_blocks = oldgen_max_blocks * G
606 * ----------------------
611 generations[g].max_blocks = (oldest_gen->max_blocks * g)
612 / (RtsFlags.GcFlags.generations-1);
614 generations[g].max_blocks = oldest_gen->max_blocks;
617 /* for older generations... */
620 /* For older generations, we need to append the
621 * scavenged_large_object list (i.e. large objects that have been
622 * promoted during this GC) to the large_object list for that step.
624 for (bd = stp->scavenged_large_objects; bd; bd = next) {
627 dbl_link_onto(bd, &stp->large_objects);
630 /* add the new blocks we promoted during this GC */
631 stp->n_blocks += stp->to_blocks;
636 /* Guess the amount of live data for stats. */
639 /* Free the small objects allocated via allocate(), since this will
640 * all have been copied into G0S1 now.
642 if (small_alloc_list != NULL) {
643 freeChain(small_alloc_list);
645 small_alloc_list = NULL;
649 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
651 /* Two-space collector:
652 * Free the old to-space, and estimate the amount of live data.
654 if (RtsFlags.GcFlags.generations == 1) {
657 if (old_to_space != NULL) {
658 freeChain(old_to_space);
660 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
661 bd->evacuated = 0; /* now from-space */
664 /* For a two-space collector, we need to resize the nursery. */
666 /* set up a new nursery. Allocate a nursery size based on a
667 * function of the amount of live data (currently a factor of 2,
668 * should be configurable (ToDo)). Use the blocks from the old
669 * nursery if possible, freeing up any left over blocks.
671 * If we get near the maximum heap size, then adjust our nursery
672 * size accordingly. If the nursery is the same size as the live
673 * data (L), then we need 3L bytes. We can reduce the size of the
674 * nursery to bring the required memory down near 2L bytes.
676 * A normal 2-space collector would need 4L bytes to give the same
677 * performance we get from 3L bytes, reducing to the same
678 * performance at 2L bytes.
680 blocks = g0s0->to_blocks;
682 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
683 RtsFlags.GcFlags.maxHeapSize ) {
684 int adjusted_blocks; /* signed on purpose */
687 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
688 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
689 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
690 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
693 blocks = adjusted_blocks;
696 blocks *= RtsFlags.GcFlags.oldGenFactor;
697 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
698 blocks = RtsFlags.GcFlags.minAllocAreaSize;
701 resizeNursery(blocks);
704 /* Generational collector:
705 * If the user has given us a suggested heap size, adjust our
706 * allocation area to make best use of the memory available.
709 if (RtsFlags.GcFlags.heapSizeSuggestion) {
711 nat needed = calcNeeded(); /* approx blocks needed at next GC */
713 /* Guess how much will be live in generation 0 step 0 next time.
714 * A good approximation is the obtained by finding the
715 * percentage of g0s0 that was live at the last minor GC.
718 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
721 /* Estimate a size for the allocation area based on the
722 * information available. We might end up going slightly under
723 * or over the suggested heap size, but we should be pretty
726 * Formula: suggested - needed
727 * ----------------------------
728 * 1 + g0s0_pcnt_kept/100
730 * where 'needed' is the amount of memory needed at the next
731 * collection for collecting all steps except g0s0.
734 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
735 (100 + (int)g0s0_pcnt_kept);
737 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
738 blocks = RtsFlags.GcFlags.minAllocAreaSize;
741 resizeNursery((nat)blocks);
745 /* mark the garbage collected CAFs as dead */
746 #if 0 /* doesn't work at the moment */
748 if (major_gc) { gcCAFs(); }
752 /* zero the scavenged static object list */
754 zero_static_object_list(scavenged_static_objects);
761 /* start any pending finalizers */
762 scheduleFinalizers(old_weak_ptr_list);
764 /* send exceptions to any threads which were about to die */
765 resurrectThreads(resurrected_threads);
767 /* check sanity after GC */
768 IF_DEBUG(sanity, checkSanity(N));
770 /* extra GC trace info */
771 IF_DEBUG(gc, stat_describe_gens());
774 /* symbol-table based profiling */
775 /* heapCensus(to_space); */ /* ToDo */
778 /* restore enclosing cost centre */
784 /* check for memory leaks if sanity checking is on */
785 IF_DEBUG(sanity, memInventory());
787 #ifdef RTS_GTK_FRONTPANEL
788 if (RtsFlags.GcFlags.frontpanel) {
789 updateFrontPanelAfterGC( N, live );
793 /* ok, GC over: tell the stats department what happened. */
794 stat_endGC(allocated, collected, live, copied, N);
799 //@node Weak Pointers, Evacuation, Garbage Collect
800 //@subsection Weak Pointers
802 /* -----------------------------------------------------------------------------
805 traverse_weak_ptr_list is called possibly many times during garbage
806 collection. It returns a flag indicating whether it did any work
807 (i.e. called evacuate on any live pointers).
809 Invariant: traverse_weak_ptr_list is called when the heap is in an
810 idempotent state. That means that there are no pending
811 evacuate/scavenge operations. This invariant helps the weak
812 pointer code decide which weak pointers are dead - if there are no
813 new live weak pointers, then all the currently unreachable ones are
816 For generational GC: we just don't try to finalize weak pointers in
817 older generations than the one we're collecting. This could
818 probably be optimised by keeping per-generation lists of weak
819 pointers, but for a few weak pointers this scheme will work.
820 -------------------------------------------------------------------------- */
821 //@cindex traverse_weak_ptr_list
824 traverse_weak_ptr_list(void)
826 StgWeak *w, **last_w, *next_w;
828 rtsBool flag = rtsFalse;
830 if (weak_done) { return rtsFalse; }
832 /* doesn't matter where we evacuate values/finalizers to, since
833 * these pointers are treated as roots (iff the keys are alive).
837 last_w = &old_weak_ptr_list;
838 for (w = old_weak_ptr_list; w; w = next_w) {
840 /* First, this weak pointer might have been evacuated. If so,
841 * remove the forwarding pointer from the weak_ptr_list.
843 if (get_itbl(w)->type == EVACUATED) {
844 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
848 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
849 * called on a live weak pointer object. Just remove it.
851 if (w->header.info == &stg_DEAD_WEAK_info) {
852 next_w = ((StgDeadWeak *)w)->link;
857 ASSERT(get_itbl(w)->type == WEAK);
859 /* Now, check whether the key is reachable.
861 if ((new = isAlive(w->key))) {
863 /* evacuate the value and finalizer */
864 w->value = evacuate(w->value);
865 w->finalizer = evacuate(w->finalizer);
866 /* remove this weak ptr from the old_weak_ptr list */
868 /* and put it on the new weak ptr list */
870 w->link = weak_ptr_list;
873 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
883 /* Now deal with the all_threads list, which behaves somewhat like
884 * the weak ptr list. If we discover any threads that are about to
885 * become garbage, we wake them up and administer an exception.
888 StgTSO *t, *tmp, *next, **prev;
890 prev = &old_all_threads;
891 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
893 /* Threads which have finished or died get dropped from
896 switch (t->what_next) {
897 case ThreadRelocated:
903 next = t->global_link;
909 /* Threads which have already been determined to be alive are
910 * moved onto the all_threads list.
912 (StgClosure *)tmp = isAlive((StgClosure *)t);
914 next = tmp->global_link;
915 tmp->global_link = all_threads;
919 prev = &(t->global_link);
920 next = t->global_link;
925 /* If we didn't make any changes, then we can go round and kill all
926 * the dead weak pointers. The old_weak_ptr list is used as a list
927 * of pending finalizers later on.
929 if (flag == rtsFalse) {
930 cleanup_weak_ptr_list(&old_weak_ptr_list);
931 for (w = old_weak_ptr_list; w; w = w->link) {
932 w->finalizer = evacuate(w->finalizer);
935 /* And resurrect any threads which were about to become garbage.
938 StgTSO *t, *tmp, *next;
939 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
940 next = t->global_link;
941 (StgClosure *)tmp = evacuate((StgClosure *)t);
942 tmp->global_link = resurrected_threads;
943 resurrected_threads = tmp;
953 /* -----------------------------------------------------------------------------
954 After GC, the live weak pointer list may have forwarding pointers
955 on it, because a weak pointer object was evacuated after being
956 moved to the live weak pointer list. We remove those forwarding
959 Also, we don't consider weak pointer objects to be reachable, but
960 we must nevertheless consider them to be "live" and retain them.
961 Therefore any weak pointer objects which haven't as yet been
962 evacuated need to be evacuated now.
963 -------------------------------------------------------------------------- */
965 //@cindex cleanup_weak_ptr_list
968 cleanup_weak_ptr_list ( StgWeak **list )
970 StgWeak *w, **last_w;
973 for (w = *list; w; w = w->link) {
975 if (get_itbl(w)->type == EVACUATED) {
976 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
980 if (Bdescr((P_)w)->evacuated == 0) {
981 (StgClosure *)w = evacuate((StgClosure *)w);
988 /* -----------------------------------------------------------------------------
989 isAlive determines whether the given closure is still alive (after
990 a garbage collection) or not. It returns the new address of the
991 closure if it is alive, or NULL otherwise.
992 -------------------------------------------------------------------------- */
997 isAlive(StgClosure *p)
999 const StgInfoTable *info;
1006 /* ToDo: for static closures, check the static link field.
1007 * Problem here is that we sometimes don't set the link field, eg.
1008 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1011 /* ignore closures in generations that we're not collecting. */
1012 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
1016 switch (info->type) {
1021 case IND_OLDGEN: /* rely on compatible layout with StgInd */
1022 case IND_OLDGEN_PERM:
1023 /* follow indirections */
1024 p = ((StgInd *)p)->indirectee;
1029 return ((StgEvacuated *)p)->evacuee;
1032 size = arr_words_sizeW((StgArrWords *)p);
1036 case MUT_ARR_PTRS_FROZEN:
1037 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1041 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1042 p = (StgClosure *)((StgTSO *)p)->link;
1046 size = tso_sizeW((StgTSO *)p);
1048 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1049 && Bdescr((P_)p)->evacuated)
1063 MarkRoot(StgClosure *root)
1065 # if 0 && defined(PAR) && defined(DEBUG)
1066 StgClosure *foo = evacuate(root);
1067 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1068 ASSERT(isAlive(foo)); // must be in to-space
1071 return evacuate(root);
1076 static void addBlock(step *stp)
1078 bdescr *bd = allocBlock();
1082 if (stp->gen->no <= N) {
1088 stp->hp_bd->free = stp->hp;
1089 stp->hp_bd->link = bd;
1090 stp->hp = bd->start;
1091 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1097 //@cindex upd_evacuee
1099 static __inline__ void
1100 upd_evacuee(StgClosure *p, StgClosure *dest)
1102 p->header.info = &stg_EVACUATED_info;
1103 ((StgEvacuated *)p)->evacuee = dest;
1108 static __inline__ StgClosure *
1109 copy(StgClosure *src, nat size, step *stp)
1113 TICK_GC_WORDS_COPIED(size);
1114 /* Find out where we're going, using the handy "to" pointer in
1115 * the step of the source object. If it turns out we need to
1116 * evacuate to an older generation, adjust it here (see comment
1119 if (stp->gen->no < evac_gen) {
1120 #ifdef NO_EAGER_PROMOTION
1121 failed_to_evac = rtsTrue;
1123 stp = &generations[evac_gen].steps[0];
1127 /* chain a new block onto the to-space for the destination step if
1130 if (stp->hp + size >= stp->hpLim) {
1134 for(to = stp->hp, from = (P_)src; size>0; --size) {
1140 upd_evacuee(src,(StgClosure *)dest);
1141 return (StgClosure *)dest;
1144 /* Special version of copy() for when we only want to copy the info
1145 * pointer of an object, but reserve some padding after it. This is
1146 * used to optimise evacuation of BLACKHOLEs.
1151 static __inline__ StgClosure *
1152 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1156 TICK_GC_WORDS_COPIED(size_to_copy);
1157 if (stp->gen->no < evac_gen) {
1158 #ifdef NO_EAGER_PROMOTION
1159 failed_to_evac = rtsTrue;
1161 stp = &generations[evac_gen].steps[0];
1165 if (stp->hp + size_to_reserve >= stp->hpLim) {
1169 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1174 stp->hp += size_to_reserve;
1175 upd_evacuee(src,(StgClosure *)dest);
1176 return (StgClosure *)dest;
1179 //@node Evacuation, Scavenging, Weak Pointers
1180 //@subsection Evacuation
1182 /* -----------------------------------------------------------------------------
1183 Evacuate a large object
1185 This just consists of removing the object from the (doubly-linked)
1186 large_alloc_list, and linking it on to the (singly-linked)
1187 new_large_objects list, from where it will be scavenged later.
1189 Convention: bd->evacuated is /= 0 for a large object that has been
1190 evacuated, or 0 otherwise.
1191 -------------------------------------------------------------------------- */
1193 //@cindex evacuate_large
1196 evacuate_large(StgPtr p, rtsBool mutable)
1198 bdescr *bd = Bdescr(p);
1201 /* should point to the beginning of the block */
1202 ASSERT(((W_)p & BLOCK_MASK) == 0);
1204 /* already evacuated? */
1205 if (bd->evacuated) {
1206 /* Don't forget to set the failed_to_evac flag if we didn't get
1207 * the desired destination (see comments in evacuate()).
1209 if (bd->gen->no < evac_gen) {
1210 failed_to_evac = rtsTrue;
1211 TICK_GC_FAILED_PROMOTION();
1217 /* remove from large_object list */
1219 bd->back->link = bd->link;
1220 } else { /* first object in the list */
1221 stp->large_objects = bd->link;
1224 bd->link->back = bd->back;
1227 /* link it on to the evacuated large object list of the destination step
1230 if (stp->gen->no < evac_gen) {
1231 #ifdef NO_EAGER_PROMOTION
1232 failed_to_evac = rtsTrue;
1234 stp = &generations[evac_gen].steps[0];
1240 bd->link = stp->new_large_objects;
1241 stp->new_large_objects = bd;
1245 recordMutable((StgMutClosure *)p);
1249 /* -----------------------------------------------------------------------------
1250 Adding a MUT_CONS to an older generation.
1252 This is necessary from time to time when we end up with an
1253 old-to-new generation pointer in a non-mutable object. We defer
1254 the promotion until the next GC.
1255 -------------------------------------------------------------------------- */
1260 mkMutCons(StgClosure *ptr, generation *gen)
1265 stp = &gen->steps[0];
1267 /* chain a new block onto the to-space for the destination step if
1270 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1274 q = (StgMutVar *)stp->hp;
1275 stp->hp += sizeofW(StgMutVar);
1277 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1279 recordOldToNewPtrs((StgMutClosure *)q);
1281 return (StgClosure *)q;
1284 /* -----------------------------------------------------------------------------
1287 This is called (eventually) for every live object in the system.
1289 The caller to evacuate specifies a desired generation in the
1290 evac_gen global variable. The following conditions apply to
1291 evacuating an object which resides in generation M when we're
1292 collecting up to generation N
1296 else evac to step->to
1298 if M < evac_gen evac to evac_gen, step 0
1300 if the object is already evacuated, then we check which generation
1303 if M >= evac_gen do nothing
1304 if M < evac_gen set failed_to_evac flag to indicate that we
1305 didn't manage to evacuate this object into evac_gen.
1307 -------------------------------------------------------------------------- */
1311 evacuate(StgClosure *q)
1316 const StgInfoTable *info;
1319 if (HEAP_ALLOCED(q)) {
1321 if (bd->gen->no > N) {
1322 /* Can't evacuate this object, because it's in a generation
1323 * older than the ones we're collecting. Let's hope that it's
1324 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1326 if (bd->gen->no < evac_gen) {
1328 failed_to_evac = rtsTrue;
1329 TICK_GC_FAILED_PROMOTION();
1336 else stp = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1339 /* make sure the info pointer is into text space */
1340 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1341 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1344 if (info->type==RBH) {
1345 info = REVERT_INFOPTR(info);
1347 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1348 q, info_type(q), info, info_type_by_ip(info)));
1352 switch (info -> type) {
1355 ASSERT(q->header.info != &stg_MUT_CONS_info);
1357 to = copy(q,sizeW_fromITBL(info),stp);
1358 recordMutable((StgMutClosure *)to);
1363 StgWord w = (StgWord)q->payload[0];
1364 if (q->header.info == Czh_con_info &&
1365 /* unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && */
1366 (StgChar)w <= MAX_CHARLIKE) {
1367 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1369 if (q->header.info == Izh_con_info &&
1370 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1371 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1373 /* else, fall through ... */
1379 return copy(q,sizeofW(StgHeader)+1,stp);
1381 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1386 #ifdef NO_PROMOTE_THUNKS
1387 if (bd->gen->no == 0 &&
1388 bd->step->no != 0 &&
1389 bd->step->no == bd->gen->n_steps-1) {
1393 return copy(q,sizeofW(StgHeader)+2,stp);
1401 return copy(q,sizeofW(StgHeader)+2,stp);
1407 case IND_OLDGEN_PERM:
1412 return copy(q,sizeW_fromITBL(info),stp);
1415 case SE_CAF_BLACKHOLE:
1418 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1421 to = copy(q,BLACKHOLE_sizeW(),stp);
1422 recordMutable((StgMutClosure *)to);
1425 case THUNK_SELECTOR:
1427 const StgInfoTable* selectee_info;
1428 StgClosure* selectee = ((StgSelector*)q)->selectee;
1431 selectee_info = get_itbl(selectee);
1432 switch (selectee_info->type) {
1441 StgWord32 offset = info->layout.selector_offset;
1443 /* check that the size is in range */
1445 (StgWord32)(selectee_info->layout.payload.ptrs +
1446 selectee_info->layout.payload.nptrs));
1448 /* perform the selection! */
1449 q = selectee->payload[offset];
1451 /* if we're already in to-space, there's no need to continue
1452 * with the evacuation, just update the source address with
1453 * a pointer to the (evacuated) constructor field.
1455 if (HEAP_ALLOCED(q)) {
1456 bdescr *bd = Bdescr((P_)q);
1457 if (bd->evacuated) {
1458 if (bd->gen->no < evac_gen) {
1459 failed_to_evac = rtsTrue;
1460 TICK_GC_FAILED_PROMOTION();
1466 /* otherwise, carry on and evacuate this constructor field,
1467 * (but not the constructor itself)
1476 case IND_OLDGEN_PERM:
1477 selectee = ((StgInd *)selectee)->indirectee;
1481 selectee = ((StgEvacuated *)selectee)->evacuee;
1492 case THUNK_SELECTOR:
1493 /* aargh - do recursively???? */
1495 case SE_CAF_BLACKHOLE:
1499 /* not evaluated yet */
1503 /* a copy of the top-level cases below */
1504 case RBH: // cf. BLACKHOLE_BQ
1506 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1507 to = copy(q,BLACKHOLE_sizeW(),stp);
1508 //ToDo: derive size etc from reverted IP
1509 //to = copy(q,size,stp);
1510 // recordMutable((StgMutClosure *)to);
1515 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1516 to = copy(q,sizeofW(StgBlockedFetch),stp);
1523 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1524 to = copy(q,sizeofW(StgFetchMe),stp);
1528 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1529 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1534 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1535 (int)(selectee_info->type));
1538 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1542 /* follow chains of indirections, don't evacuate them */
1543 q = ((StgInd*)q)->indirectee;
1547 if (info->srt_len > 0 && major_gc &&
1548 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1549 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1550 static_objects = (StgClosure *)q;
1555 if (info->srt_len > 0 && major_gc &&
1556 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1557 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1558 static_objects = (StgClosure *)q;
1563 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1564 * on the CAF list, so don't do anything with it here (we'll
1565 * scavenge it later).
1568 && ((StgIndStatic *)q)->saved_info == NULL
1569 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1570 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1571 static_objects = (StgClosure *)q;
1576 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1577 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1578 static_objects = (StgClosure *)q;
1582 case CONSTR_INTLIKE:
1583 case CONSTR_CHARLIKE:
1584 case CONSTR_NOCAF_STATIC:
1585 /* no need to put these on the static linked list, they don't need
1600 /* shouldn't see these */
1601 barf("evacuate: stack frame at %p\n", q);
1605 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1606 * of stack, tagging and all.
1608 * They can be larger than a block in size. Both are only
1609 * allocated via allocate(), so they should be chained on to the
1610 * large_object list.
1613 nat size = pap_sizeW((StgPAP*)q);
1614 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1615 evacuate_large((P_)q, rtsFalse);
1618 return copy(q,size,stp);
1623 /* Already evacuated, just return the forwarding address.
1624 * HOWEVER: if the requested destination generation (evac_gen) is
1625 * older than the actual generation (because the object was
1626 * already evacuated to a younger generation) then we have to
1627 * set the failed_to_evac flag to indicate that we couldn't
1628 * manage to promote the object to the desired generation.
1630 if (evac_gen > 0) { /* optimisation */
1631 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1632 if (Bdescr((P_)p)->gen->no < evac_gen) {
1633 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1634 failed_to_evac = rtsTrue;
1635 TICK_GC_FAILED_PROMOTION();
1638 return ((StgEvacuated*)q)->evacuee;
1642 nat size = arr_words_sizeW((StgArrWords *)q);
1644 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1645 evacuate_large((P_)q, rtsFalse);
1648 /* just copy the block */
1649 return copy(q,size,stp);
1654 case MUT_ARR_PTRS_FROZEN:
1656 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1658 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1659 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1662 /* just copy the block */
1663 to = copy(q,size,stp);
1664 if (info->type == MUT_ARR_PTRS) {
1665 recordMutable((StgMutClosure *)to);
1673 StgTSO *tso = (StgTSO *)q;
1674 nat size = tso_sizeW(tso);
1677 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1679 if (tso->what_next == ThreadRelocated) {
1680 q = (StgClosure *)tso->link;
1684 /* Large TSOs don't get moved, so no relocation is required.
1686 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1687 evacuate_large((P_)q, rtsTrue);
1690 /* To evacuate a small TSO, we need to relocate the update frame
1694 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1696 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1698 /* relocate the stack pointers... */
1699 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1700 new_tso->sp = (StgPtr)new_tso->sp + diff;
1702 relocate_TSO(tso, new_tso);
1704 recordMutable((StgMutClosure *)new_tso);
1705 return (StgClosure *)new_tso;
1710 case RBH: // cf. BLACKHOLE_BQ
1712 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1713 to = copy(q,BLACKHOLE_sizeW(),stp);
1714 //ToDo: derive size etc from reverted IP
1715 //to = copy(q,size,stp);
1716 recordMutable((StgMutClosure *)to);
1718 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1719 q, info_type(q), to, info_type(to)));
1724 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1725 to = copy(q,sizeofW(StgBlockedFetch),stp);
1727 belch("@@ evacuate: %p (%s) to %p (%s)",
1728 q, info_type(q), to, info_type(to)));
1735 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1736 to = copy(q,sizeofW(StgFetchMe),stp);
1738 belch("@@ evacuate: %p (%s) to %p (%s)",
1739 q, info_type(q), to, info_type(to)));
1743 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1744 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1746 belch("@@ evacuate: %p (%s) to %p (%s)",
1747 q, info_type(q), to, info_type(to)));
1752 barf("evacuate: strange closure type %d", (int)(info->type));
1758 /* -----------------------------------------------------------------------------
1759 relocate_TSO is called just after a TSO has been copied from src to
1760 dest. It adjusts the update frame list for the new location.
1761 -------------------------------------------------------------------------- */
1762 //@cindex relocate_TSO
1765 relocate_TSO(StgTSO *src, StgTSO *dest)
1772 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1776 while ((P_)su < dest->stack + dest->stack_size) {
1777 switch (get_itbl(su)->type) {
1779 /* GCC actually manages to common up these three cases! */
1782 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1787 cf = (StgCatchFrame *)su;
1788 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1793 sf = (StgSeqFrame *)su;
1794 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1803 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1811 //@node Scavenging, Reverting CAFs, Evacuation
1812 //@subsection Scavenging
1814 //@cindex scavenge_srt
1817 scavenge_srt(const StgInfoTable *info)
1819 StgClosure **srt, **srt_end;
1821 /* evacuate the SRT. If srt_len is zero, then there isn't an
1822 * srt field in the info table. That's ok, because we'll
1823 * never dereference it.
1825 srt = (StgClosure **)(info->srt);
1826 srt_end = srt + info->srt_len;
1827 for (; srt < srt_end; srt++) {
1828 /* Special-case to handle references to closures hiding out in DLLs, since
1829 double indirections required to get at those. The code generator knows
1830 which is which when generating the SRT, so it stores the (indirect)
1831 reference to the DLL closure in the table by first adding one to it.
1832 We check for this here, and undo the addition before evacuating it.
1834 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1835 closure that's fixed at link-time, and no extra magic is required.
1837 #ifdef ENABLE_WIN32_DLL_SUPPORT
1838 if ( (unsigned long)(*srt) & 0x1 ) {
1839 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1849 /* -----------------------------------------------------------------------------
1851 -------------------------------------------------------------------------- */
1854 scavengeTSO (StgTSO *tso)
1856 /* chase the link field for any TSOs on the same queue */
1857 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1858 if ( tso->why_blocked == BlockedOnMVar
1859 || tso->why_blocked == BlockedOnBlackHole
1860 || tso->why_blocked == BlockedOnException
1862 || tso->why_blocked == BlockedOnGA
1863 || tso->why_blocked == BlockedOnGA_NoSend
1866 tso->block_info.closure = evacuate(tso->block_info.closure);
1868 if ( tso->blocked_exceptions != NULL ) {
1869 tso->blocked_exceptions =
1870 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1872 /* scavenge this thread's stack */
1873 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1876 /* -----------------------------------------------------------------------------
1877 Scavenge a given step until there are no more objects in this step
1880 evac_gen is set by the caller to be either zero (for a step in a
1881 generation < N) or G where G is the generation of the step being
1884 We sometimes temporarily change evac_gen back to zero if we're
1885 scavenging a mutable object where early promotion isn't such a good
1887 -------------------------------------------------------------------------- */
1894 const StgInfoTable *info;
1896 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1901 failed_to_evac = rtsFalse;
1903 /* scavenge phase - standard breadth-first scavenging of the
1907 while (bd != stp->hp_bd || p < stp->hp) {
1909 /* If we're at the end of this block, move on to the next block */
1910 if (bd != stp->hp_bd && p == bd->free) {
1916 q = p; /* save ptr to object */
1918 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1919 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1921 info = get_itbl((StgClosure *)p);
1923 if (info->type==RBH)
1924 info = REVERT_INFOPTR(info);
1927 switch (info -> type) {
1930 /* treat MVars specially, because we don't want to evacuate the
1931 * mut_link field in the middle of the closure.
1934 StgMVar *mvar = ((StgMVar *)p);
1936 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1937 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1938 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1939 p += sizeofW(StgMVar);
1940 evac_gen = saved_evac_gen;
1948 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1949 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1950 p += sizeofW(StgHeader) + 2;
1955 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1956 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1962 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1963 p += sizeofW(StgHeader) + 1;
1968 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1974 p += sizeofW(StgHeader) + 1;
1981 p += sizeofW(StgHeader) + 2;
1988 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1989 p += sizeofW(StgHeader) + 2;
2005 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2006 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2007 (StgClosure *)*p = evacuate((StgClosure *)*p);
2009 p += info->layout.payload.nptrs;
2014 if (stp->gen->no != 0) {
2015 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2018 case IND_OLDGEN_PERM:
2019 ((StgIndOldGen *)p)->indirectee =
2020 evacuate(((StgIndOldGen *)p)->indirectee);
2021 if (failed_to_evac) {
2022 failed_to_evac = rtsFalse;
2023 recordOldToNewPtrs((StgMutClosure *)p);
2025 p += sizeofW(StgIndOldGen);
2029 /* ignore MUT_CONSs */
2030 if (((StgMutVar *)p)->header.info != &stg_MUT_CONS_info) {
2032 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2033 evac_gen = saved_evac_gen;
2035 p += sizeofW(StgMutVar);
2039 case SE_CAF_BLACKHOLE:
2042 p += BLACKHOLE_sizeW();
2047 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2048 (StgClosure *)bh->blocking_queue =
2049 evacuate((StgClosure *)bh->blocking_queue);
2050 if (failed_to_evac) {
2051 failed_to_evac = rtsFalse;
2052 recordMutable((StgMutClosure *)bh);
2054 p += BLACKHOLE_sizeW();
2058 case THUNK_SELECTOR:
2060 StgSelector *s = (StgSelector *)p;
2061 s->selectee = evacuate(s->selectee);
2062 p += THUNK_SELECTOR_sizeW();
2068 barf("scavenge:IND???\n");
2070 case CONSTR_INTLIKE:
2071 case CONSTR_CHARLIKE:
2073 case CONSTR_NOCAF_STATIC:
2077 /* Shouldn't see a static object here. */
2078 barf("scavenge: STATIC object\n");
2090 /* Shouldn't see stack frames here. */
2091 barf("scavenge: stack frame\n");
2093 case AP_UPD: /* same as PAPs */
2095 /* Treat a PAP just like a section of stack, not forgetting to
2096 * evacuate the function pointer too...
2099 StgPAP* pap = (StgPAP *)p;
2101 pap->fun = evacuate(pap->fun);
2102 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2103 p += pap_sizeW(pap);
2108 /* nothing to follow */
2109 p += arr_words_sizeW((StgArrWords *)p);
2113 /* follow everything */
2117 evac_gen = 0; /* repeatedly mutable */
2118 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2119 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2120 (StgClosure *)*p = evacuate((StgClosure *)*p);
2122 evac_gen = saved_evac_gen;
2126 case MUT_ARR_PTRS_FROZEN:
2127 /* follow everything */
2129 StgPtr start = p, next;
2131 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2132 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2133 (StgClosure *)*p = evacuate((StgClosure *)*p);
2135 if (failed_to_evac) {
2136 /* we can do this easier... */
2137 recordMutable((StgMutClosure *)start);
2138 failed_to_evac = rtsFalse;
2145 StgTSO *tso = (StgTSO *)p;
2148 evac_gen = saved_evac_gen;
2149 p += tso_sizeW(tso);
2154 case RBH: // cf. BLACKHOLE_BQ
2156 // nat size, ptrs, nonptrs, vhs;
2158 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2159 StgRBH *rbh = (StgRBH *)p;
2160 (StgClosure *)rbh->blocking_queue =
2161 evacuate((StgClosure *)rbh->blocking_queue);
2162 if (failed_to_evac) {
2163 failed_to_evac = rtsFalse;
2164 recordMutable((StgMutClosure *)rbh);
2167 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2168 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2169 // ToDo: use size of reverted closure here!
2170 p += BLACKHOLE_sizeW();
2176 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2177 /* follow the pointer to the node which is being demanded */
2178 (StgClosure *)bf->node =
2179 evacuate((StgClosure *)bf->node);
2180 /* follow the link to the rest of the blocking queue */
2181 (StgClosure *)bf->link =
2182 evacuate((StgClosure *)bf->link);
2183 if (failed_to_evac) {
2184 failed_to_evac = rtsFalse;
2185 recordMutable((StgMutClosure *)bf);
2188 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2189 bf, info_type((StgClosure *)bf),
2190 bf->node, info_type(bf->node)));
2191 p += sizeofW(StgBlockedFetch);
2199 p += sizeofW(StgFetchMe);
2200 break; // nothing to do in this case
2202 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2204 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2205 (StgClosure *)fmbq->blocking_queue =
2206 evacuate((StgClosure *)fmbq->blocking_queue);
2207 if (failed_to_evac) {
2208 failed_to_evac = rtsFalse;
2209 recordMutable((StgMutClosure *)fmbq);
2212 belch("@@ scavenge: %p (%s) exciting, isn't it",
2213 p, info_type((StgClosure *)p)));
2214 p += sizeofW(StgFetchMeBlockingQueue);
2220 barf("scavenge: unimplemented/strange closure type %d @ %p",
2224 barf("scavenge: unimplemented/strange closure type %d @ %p",
2228 /* If we didn't manage to promote all the objects pointed to by
2229 * the current object, then we have to designate this object as
2230 * mutable (because it contains old-to-new generation pointers).
2232 if (failed_to_evac) {
2233 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2234 failed_to_evac = rtsFalse;
2242 /* -----------------------------------------------------------------------------
2243 Scavenge one object.
2245 This is used for objects that are temporarily marked as mutable
2246 because they contain old-to-new generation pointers. Only certain
2247 objects can have this property.
2248 -------------------------------------------------------------------------- */
2249 //@cindex scavenge_one
2252 scavenge_one(StgClosure *p)
2254 const StgInfoTable *info;
2257 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2258 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2263 if (info->type==RBH)
2264 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2267 switch (info -> type) {
2270 case FUN_1_0: /* hardly worth specialising these guys */
2290 case IND_OLDGEN_PERM:
2294 end = (P_)p->payload + info->layout.payload.ptrs;
2295 for (q = (P_)p->payload; q < end; q++) {
2296 (StgClosure *)*q = evacuate((StgClosure *)*q);
2302 case SE_CAF_BLACKHOLE:
2307 case THUNK_SELECTOR:
2309 StgSelector *s = (StgSelector *)p;
2310 s->selectee = evacuate(s->selectee);
2314 case AP_UPD: /* same as PAPs */
2316 /* Treat a PAP just like a section of stack, not forgetting to
2317 * evacuate the function pointer too...
2320 StgPAP* pap = (StgPAP *)p;
2322 pap->fun = evacuate(pap->fun);
2323 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2328 /* This might happen if for instance a MUT_CONS was pointing to a
2329 * THUNK which has since been updated. The IND_OLDGEN will
2330 * be on the mutable list anyway, so we don't need to do anything
2336 barf("scavenge_one: strange object %d", (int)(info->type));
2339 no_luck = failed_to_evac;
2340 failed_to_evac = rtsFalse;
2345 /* -----------------------------------------------------------------------------
2346 Scavenging mutable lists.
2348 We treat the mutable list of each generation > N (i.e. all the
2349 generations older than the one being collected) as roots. We also
2350 remove non-mutable objects from the mutable list at this point.
2351 -------------------------------------------------------------------------- */
2352 //@cindex scavenge_mut_once_list
2355 scavenge_mut_once_list(generation *gen)
2357 const StgInfoTable *info;
2358 StgMutClosure *p, *next, *new_list;
2360 p = gen->mut_once_list;
2361 new_list = END_MUT_LIST;
2365 failed_to_evac = rtsFalse;
2367 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2369 /* make sure the info pointer is into text space */
2370 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2371 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2375 if (info->type==RBH)
2376 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2378 switch(info->type) {
2381 case IND_OLDGEN_PERM:
2383 /* Try to pull the indirectee into this generation, so we can
2384 * remove the indirection from the mutable list.
2386 ((StgIndOldGen *)p)->indirectee =
2387 evacuate(((StgIndOldGen *)p)->indirectee);
2390 if (RtsFlags.DebugFlags.gc)
2391 /* Debugging code to print out the size of the thing we just
2395 StgPtr start = gen->steps[0].scan;
2396 bdescr *start_bd = gen->steps[0].scan_bd;
2398 scavenge(&gen->steps[0]);
2399 if (start_bd != gen->steps[0].scan_bd) {
2400 size += (P_)BLOCK_ROUND_UP(start) - start;
2401 start_bd = start_bd->link;
2402 while (start_bd != gen->steps[0].scan_bd) {
2403 size += BLOCK_SIZE_W;
2404 start_bd = start_bd->link;
2406 size += gen->steps[0].scan -
2407 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2409 size = gen->steps[0].scan - start;
2411 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2415 /* failed_to_evac might happen if we've got more than two
2416 * generations, we're collecting only generation 0, the
2417 * indirection resides in generation 2 and the indirectee is
2420 if (failed_to_evac) {
2421 failed_to_evac = rtsFalse;
2422 p->mut_link = new_list;
2425 /* the mut_link field of an IND_STATIC is overloaded as the
2426 * static link field too (it just so happens that we don't need
2427 * both at the same time), so we need to NULL it out when
2428 * removing this object from the mutable list because the static
2429 * link fields are all assumed to be NULL before doing a major
2437 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2438 * it from the mutable list if possible by promoting whatever it
2441 ASSERT(p->header.info == &stg_MUT_CONS_info);
2442 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2443 /* didn't manage to promote everything, so put the
2444 * MUT_CONS back on the list.
2446 p->mut_link = new_list;
2452 /* shouldn't have anything else on the mutables list */
2453 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2457 gen->mut_once_list = new_list;
2460 //@cindex scavenge_mutable_list
2463 scavenge_mutable_list(generation *gen)
2465 const StgInfoTable *info;
2466 StgMutClosure *p, *next;
2468 p = gen->saved_mut_list;
2472 failed_to_evac = rtsFalse;
2474 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2476 /* make sure the info pointer is into text space */
2477 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2478 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2482 if (info->type==RBH)
2483 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2485 switch(info->type) {
2487 case MUT_ARR_PTRS_FROZEN:
2488 /* remove this guy from the mutable list, but follow the ptrs
2489 * anyway (and make sure they get promoted to this gen).
2494 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2496 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2497 (StgClosure *)*q = evacuate((StgClosure *)*q);
2501 if (failed_to_evac) {
2502 failed_to_evac = rtsFalse;
2503 p->mut_link = gen->mut_list;
2510 /* follow everything */
2511 p->mut_link = gen->mut_list;
2516 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2517 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2518 (StgClosure *)*q = evacuate((StgClosure *)*q);
2524 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2525 * it from the mutable list if possible by promoting whatever it
2528 ASSERT(p->header.info != &stg_MUT_CONS_info);
2529 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2530 p->mut_link = gen->mut_list;
2536 StgMVar *mvar = (StgMVar *)p;
2537 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2538 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2539 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2540 p->mut_link = gen->mut_list;
2547 StgTSO *tso = (StgTSO *)p;
2551 /* Don't take this TSO off the mutable list - it might still
2552 * point to some younger objects (because we set evac_gen to 0
2555 tso->mut_link = gen->mut_list;
2556 gen->mut_list = (StgMutClosure *)tso;
2562 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2563 (StgClosure *)bh->blocking_queue =
2564 evacuate((StgClosure *)bh->blocking_queue);
2565 p->mut_link = gen->mut_list;
2570 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2573 case IND_OLDGEN_PERM:
2574 /* Try to pull the indirectee into this generation, so we can
2575 * remove the indirection from the mutable list.
2578 ((StgIndOldGen *)p)->indirectee =
2579 evacuate(((StgIndOldGen *)p)->indirectee);
2582 if (failed_to_evac) {
2583 failed_to_evac = rtsFalse;
2584 p->mut_link = gen->mut_once_list;
2585 gen->mut_once_list = p;
2592 // HWL: check whether all of these are necessary
2594 case RBH: // cf. BLACKHOLE_BQ
2596 // nat size, ptrs, nonptrs, vhs;
2598 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2599 StgRBH *rbh = (StgRBH *)p;
2600 (StgClosure *)rbh->blocking_queue =
2601 evacuate((StgClosure *)rbh->blocking_queue);
2602 if (failed_to_evac) {
2603 failed_to_evac = rtsFalse;
2604 recordMutable((StgMutClosure *)rbh);
2606 // ToDo: use size of reverted closure here!
2607 p += BLACKHOLE_sizeW();
2613 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2614 /* follow the pointer to the node which is being demanded */
2615 (StgClosure *)bf->node =
2616 evacuate((StgClosure *)bf->node);
2617 /* follow the link to the rest of the blocking queue */
2618 (StgClosure *)bf->link =
2619 evacuate((StgClosure *)bf->link);
2620 if (failed_to_evac) {
2621 failed_to_evac = rtsFalse;
2622 recordMutable((StgMutClosure *)bf);
2624 p += sizeofW(StgBlockedFetch);
2630 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
2633 p += sizeofW(StgFetchMe);
2634 break; // nothing to do in this case
2636 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2638 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2639 (StgClosure *)fmbq->blocking_queue =
2640 evacuate((StgClosure *)fmbq->blocking_queue);
2641 if (failed_to_evac) {
2642 failed_to_evac = rtsFalse;
2643 recordMutable((StgMutClosure *)fmbq);
2645 p += sizeofW(StgFetchMeBlockingQueue);
2651 /* shouldn't have anything else on the mutables list */
2652 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2657 //@cindex scavenge_static
2660 scavenge_static(void)
2662 StgClosure* p = static_objects;
2663 const StgInfoTable *info;
2665 /* Always evacuate straight to the oldest generation for static
2667 evac_gen = oldest_gen->no;
2669 /* keep going until we've scavenged all the objects on the linked
2671 while (p != END_OF_STATIC_LIST) {
2675 if (info->type==RBH)
2676 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2678 /* make sure the info pointer is into text space */
2679 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2680 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2682 /* Take this object *off* the static_objects list,
2683 * and put it on the scavenged_static_objects list.
2685 static_objects = STATIC_LINK(info,p);
2686 STATIC_LINK(info,p) = scavenged_static_objects;
2687 scavenged_static_objects = p;
2689 switch (info -> type) {
2693 StgInd *ind = (StgInd *)p;
2694 ind->indirectee = evacuate(ind->indirectee);
2696 /* might fail to evacuate it, in which case we have to pop it
2697 * back on the mutable list (and take it off the
2698 * scavenged_static list because the static link and mut link
2699 * pointers are one and the same).
2701 if (failed_to_evac) {
2702 failed_to_evac = rtsFalse;
2703 scavenged_static_objects = STATIC_LINK(info,p);
2704 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2705 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2719 next = (P_)p->payload + info->layout.payload.ptrs;
2720 /* evacuate the pointers */
2721 for (q = (P_)p->payload; q < next; q++) {
2722 (StgClosure *)*q = evacuate((StgClosure *)*q);
2728 barf("scavenge_static: strange closure %d", (int)(info->type));
2731 ASSERT(failed_to_evac == rtsFalse);
2733 /* get the next static object from the list. Remember, there might
2734 * be more stuff on this list now that we've done some evacuating!
2735 * (static_objects is a global)
2741 /* -----------------------------------------------------------------------------
2742 scavenge_stack walks over a section of stack and evacuates all the
2743 objects pointed to by it. We can use the same code for walking
2744 PAPs, since these are just sections of copied stack.
2745 -------------------------------------------------------------------------- */
2746 //@cindex scavenge_stack
2749 scavenge_stack(StgPtr p, StgPtr stack_end)
2752 const StgInfoTable* info;
2755 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2758 * Each time around this loop, we are looking at a chunk of stack
2759 * that starts with either a pending argument section or an
2760 * activation record.
2763 while (p < stack_end) {
2766 /* If we've got a tag, skip over that many words on the stack */
2767 if (IS_ARG_TAG((W_)q)) {
2772 /* Is q a pointer to a closure?
2774 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2776 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2777 ASSERT(closure_STATIC((StgClosure *)q));
2779 /* otherwise, must be a pointer into the allocation space. */
2782 (StgClosure *)*p = evacuate((StgClosure *)q);
2788 * Otherwise, q must be the info pointer of an activation
2789 * record. All activation records have 'bitmap' style layout
2792 info = get_itbl((StgClosure *)p);
2794 switch (info->type) {
2796 /* Dynamic bitmap: the mask is stored on the stack */
2798 bitmap = ((StgRetDyn *)p)->liveness;
2799 p = (P_)&((StgRetDyn *)p)->payload[0];
2802 /* probably a slow-entry point return address: */
2810 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2811 old_p, p, old_p+1));
2813 p++; /* what if FHS!=1 !? -- HWL */
2818 /* Specialised code for update frames, since they're so common.
2819 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2820 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2824 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2826 nat type = get_itbl(frame->updatee)->type;
2828 p += sizeofW(StgUpdateFrame);
2829 if (type == EVACUATED) {
2830 frame->updatee = evacuate(frame->updatee);
2833 bdescr *bd = Bdescr((P_)frame->updatee);
2835 if (bd->gen->no > N) {
2836 if (bd->gen->no < evac_gen) {
2837 failed_to_evac = rtsTrue;
2842 /* Don't promote blackholes */
2844 if (!(stp->gen->no == 0 &&
2846 stp->no == stp->gen->n_steps-1)) {
2853 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2854 sizeofW(StgHeader), stp);
2855 frame->updatee = to;
2858 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
2859 frame->updatee = to;
2860 recordMutable((StgMutClosure *)to);
2863 /* will never be SE_{,CAF_}BLACKHOLE, since we
2864 don't push an update frame for single-entry thunks. KSW 1999-01. */
2865 barf("scavenge_stack: UPDATE_FRAME updatee");
2870 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2877 bitmap = info->layout.bitmap;
2879 /* this assumes that the payload starts immediately after the info-ptr */
2881 while (bitmap != 0) {
2882 if ((bitmap & 1) == 0) {
2883 (StgClosure *)*p = evacuate((StgClosure *)*p);
2886 bitmap = bitmap >> 1;
2893 /* large bitmap (> 32 entries) */
2898 StgLargeBitmap *large_bitmap;
2901 large_bitmap = info->layout.large_bitmap;
2904 for (i=0; i<large_bitmap->size; i++) {
2905 bitmap = large_bitmap->bitmap[i];
2906 q = p + sizeof(W_) * 8;
2907 while (bitmap != 0) {
2908 if ((bitmap & 1) == 0) {
2909 (StgClosure *)*p = evacuate((StgClosure *)*p);
2912 bitmap = bitmap >> 1;
2914 if (i+1 < large_bitmap->size) {
2916 (StgClosure *)*p = evacuate((StgClosure *)*p);
2922 /* and don't forget to follow the SRT */
2927 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2932 /*-----------------------------------------------------------------------------
2933 scavenge the large object list.
2935 evac_gen set by caller; similar games played with evac_gen as with
2936 scavenge() - see comment at the top of scavenge(). Most large
2937 objects are (repeatedly) mutable, so most of the time evac_gen will
2939 --------------------------------------------------------------------------- */
2940 //@cindex scavenge_large
2943 scavenge_large(step *stp)
2947 const StgInfoTable* info;
2948 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2950 evac_gen = 0; /* most objects are mutable */
2951 bd = stp->new_large_objects;
2953 for (; bd != NULL; bd = stp->new_large_objects) {
2955 /* take this object *off* the large objects list and put it on
2956 * the scavenged large objects list. This is so that we can
2957 * treat new_large_objects as a stack and push new objects on
2958 * the front when evacuating.
2960 stp->new_large_objects = bd->link;
2961 dbl_link_onto(bd, &stp->scavenged_large_objects);
2964 info = get_itbl((StgClosure *)p);
2966 switch (info->type) {
2968 /* only certain objects can be "large"... */
2971 /* nothing to follow */
2975 /* follow everything */
2979 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2980 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2981 (StgClosure *)*p = evacuate((StgClosure *)*p);
2986 case MUT_ARR_PTRS_FROZEN:
2987 /* follow everything */
2989 StgPtr start = p, next;
2991 evac_gen = saved_evac_gen; /* not really mutable */
2992 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2993 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2994 (StgClosure *)*p = evacuate((StgClosure *)*p);
2997 if (failed_to_evac) {
2998 recordMutable((StgMutClosure *)start);
3004 scavengeTSO((StgTSO *)p);
3010 StgPAP* pap = (StgPAP *)p;
3012 evac_gen = saved_evac_gen; /* not really mutable */
3013 pap->fun = evacuate(pap->fun);
3014 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3020 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3025 //@cindex zero_static_object_list
3028 zero_static_object_list(StgClosure* first_static)
3032 const StgInfoTable *info;
3034 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3036 link = STATIC_LINK(info, p);
3037 STATIC_LINK(info,p) = NULL;
3041 /* This function is only needed because we share the mutable link
3042 * field with the static link field in an IND_STATIC, so we have to
3043 * zero the mut_link field before doing a major GC, which needs the
3044 * static link field.
3046 * It doesn't do any harm to zero all the mutable link fields on the
3051 zero_mutable_list( StgMutClosure *first )
3053 StgMutClosure *next, *c;
3055 for (c = first; c != END_MUT_LIST; c = next) {
3061 /* -----------------------------------------------------------------------------
3063 -------------------------------------------------------------------------- */
3070 for (c = (StgIndStatic *)caf_list; c != NULL;
3071 c = (StgIndStatic *)c->static_link)
3073 c->header.info = c->saved_info;
3074 c->saved_info = NULL;
3075 /* could, but not necessary: c->static_link = NULL; */
3081 scavengeCAFs( void )
3086 for (c = (StgIndStatic *)caf_list; c != NULL;
3087 c = (StgIndStatic *)c->static_link)
3089 c->indirectee = evacuate(c->indirectee);
3093 /* -----------------------------------------------------------------------------
3094 Sanity code for CAF garbage collection.
3096 With DEBUG turned on, we manage a CAF list in addition to the SRT
3097 mechanism. After GC, we run down the CAF list and blackhole any
3098 CAFs which have been garbage collected. This means we get an error
3099 whenever the program tries to enter a garbage collected CAF.
3101 Any garbage collected CAFs are taken off the CAF list at the same
3103 -------------------------------------------------------------------------- */
3113 const StgInfoTable *info;
3124 ASSERT(info->type == IND_STATIC);
3126 if (STATIC_LINK(info,p) == NULL) {
3127 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3129 SET_INFO(p,&stg_BLACKHOLE_info);
3130 p = STATIC_LINK2(info,p);
3134 pp = &STATIC_LINK2(info,p);
3141 /* fprintf(stderr, "%d CAFs live\n", i); */
3145 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3146 //@subsection Lazy black holing
3148 /* -----------------------------------------------------------------------------
3151 Whenever a thread returns to the scheduler after possibly doing
3152 some work, we have to run down the stack and black-hole all the
3153 closures referred to by update frames.
3154 -------------------------------------------------------------------------- */
3155 //@cindex threadLazyBlackHole
3158 threadLazyBlackHole(StgTSO *tso)
3160 StgUpdateFrame *update_frame;
3161 StgBlockingQueue *bh;
3164 stack_end = &tso->stack[tso->stack_size];
3165 update_frame = tso->su;
3168 switch (get_itbl(update_frame)->type) {
3171 update_frame = ((StgCatchFrame *)update_frame)->link;
3175 bh = (StgBlockingQueue *)update_frame->updatee;
3177 /* if the thunk is already blackholed, it means we've also
3178 * already blackholed the rest of the thunks on this stack,
3179 * so we can stop early.
3181 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3182 * don't interfere with this optimisation.
3184 if (bh->header.info == &stg_BLACKHOLE_info) {
3188 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3189 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3190 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3191 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3193 SET_INFO(bh,&stg_BLACKHOLE_info);
3196 update_frame = update_frame->link;
3200 update_frame = ((StgSeqFrame *)update_frame)->link;
3206 barf("threadPaused");
3211 //@node Stack squeezing, Pausing a thread, Lazy black holing
3212 //@subsection Stack squeezing
3214 /* -----------------------------------------------------------------------------
3217 * Code largely pinched from old RTS, then hacked to bits. We also do
3218 * lazy black holing here.
3220 * -------------------------------------------------------------------------- */
3221 //@cindex threadSqueezeStack
3224 threadSqueezeStack(StgTSO *tso)
3226 lnat displacement = 0;
3227 StgUpdateFrame *frame;
3228 StgUpdateFrame *next_frame; /* Temporally next */
3229 StgUpdateFrame *prev_frame; /* Temporally previous */
3231 rtsBool prev_was_update_frame;
3233 StgUpdateFrame *top_frame;
3234 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3236 void printObj( StgClosure *obj ); // from Printer.c
3238 top_frame = tso->su;
3241 bottom = &(tso->stack[tso->stack_size]);
3244 /* There must be at least one frame, namely the STOP_FRAME.
3246 ASSERT((P_)frame < bottom);
3248 /* Walk down the stack, reversing the links between frames so that
3249 * we can walk back up as we squeeze from the bottom. Note that
3250 * next_frame and prev_frame refer to next and previous as they were
3251 * added to the stack, rather than the way we see them in this
3252 * walk. (It makes the next loop less confusing.)
3254 * Stop if we find an update frame pointing to a black hole
3255 * (see comment in threadLazyBlackHole()).
3259 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3260 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3261 prev_frame = frame->link;
3262 frame->link = next_frame;
3267 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3268 printObj((StgClosure *)prev_frame);
3269 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3272 switch (get_itbl(frame)->type) {
3275 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3288 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3290 printObj((StgClosure *)prev_frame);
3293 if (get_itbl(frame)->type == UPDATE_FRAME
3294 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3299 /* Now, we're at the bottom. Frame points to the lowest update
3300 * frame on the stack, and its link actually points to the frame
3301 * above. We have to walk back up the stack, squeezing out empty
3302 * update frames and turning the pointers back around on the way
3305 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3306 * we never want to eliminate it anyway. Just walk one step up
3307 * before starting to squeeze. When you get to the topmost frame,
3308 * remember that there are still some words above it that might have
3315 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3318 * Loop through all of the frames (everything except the very
3319 * bottom). Things are complicated by the fact that we have
3320 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3321 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3323 while (frame != NULL) {
3325 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3326 rtsBool is_update_frame;
3328 next_frame = frame->link;
3329 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3332 * 1. both the previous and current frame are update frames
3333 * 2. the current frame is empty
3335 if (prev_was_update_frame && is_update_frame &&
3336 (P_)prev_frame == frame_bottom + displacement) {
3338 /* Now squeeze out the current frame */
3339 StgClosure *updatee_keep = prev_frame->updatee;
3340 StgClosure *updatee_bypass = frame->updatee;
3343 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3347 /* Deal with blocking queues. If both updatees have blocked
3348 * threads, then we should merge the queues into the update
3349 * frame that we're keeping.
3351 * Alternatively, we could just wake them up: they'll just go
3352 * straight to sleep on the proper blackhole! This is less code
3353 * and probably less bug prone, although it's probably much
3356 #if 0 /* do it properly... */
3357 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3358 # error Unimplemented lazy BH warning. (KSW 1999-01)
3360 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3361 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3363 /* Sigh. It has one. Don't lose those threads! */
3364 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3365 /* Urgh. Two queues. Merge them. */
3366 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3368 while (keep_tso->link != END_TSO_QUEUE) {
3369 keep_tso = keep_tso->link;
3371 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3374 /* For simplicity, just swap the BQ for the BH */
3375 P_ temp = updatee_keep;
3377 updatee_keep = updatee_bypass;
3378 updatee_bypass = temp;
3380 /* Record the swap in the kept frame (below) */
3381 prev_frame->updatee = updatee_keep;
3386 TICK_UPD_SQUEEZED();
3387 /* wasn't there something about update squeezing and ticky to be
3388 * sorted out? oh yes: we aren't counting each enter properly
3389 * in this case. See the log somewhere. KSW 1999-04-21
3391 * Check two things: that the two update frames don't point to
3392 * the same object, and that the updatee_bypass isn't already an
3393 * indirection. Both of these cases only happen when we're in a
3394 * block hole-style loop (and there are multiple update frames
3395 * on the stack pointing to the same closure), but they can both
3396 * screw us up if we don't check.
3398 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3399 /* this wakes the threads up */
3400 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3403 sp = (P_)frame - 1; /* sp = stuff to slide */
3404 displacement += sizeofW(StgUpdateFrame);
3407 /* No squeeze for this frame */
3408 sp = frame_bottom - 1; /* Keep the current frame */
3410 /* Do lazy black-holing.
3412 if (is_update_frame) {
3413 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3414 if (bh->header.info != &stg_BLACKHOLE_info &&
3415 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3416 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3417 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3418 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3421 /* zero out the slop so that the sanity checker can tell
3422 * where the next closure is.
3425 StgInfoTable *info = get_itbl(bh);
3426 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3427 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3428 * info is used for a different purpose, and it's exactly the
3429 * same size as a BLACKHOLE in any case.
3431 if (info->type != THUNK_SELECTOR) {
3432 for (i = np; i < np + nw; i++) {
3433 ((StgClosure *)bh)->payload[i] = 0;
3438 SET_INFO(bh,&stg_BLACKHOLE_info);
3442 /* Fix the link in the current frame (should point to the frame below) */
3443 frame->link = prev_frame;
3444 prev_was_update_frame = is_update_frame;
3447 /* Now slide all words from sp up to the next frame */
3449 if (displacement > 0) {
3450 P_ next_frame_bottom;
3452 if (next_frame != NULL)
3453 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3455 next_frame_bottom = tso->sp - 1;
3459 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3463 while (sp >= next_frame_bottom) {
3464 sp[displacement] = *sp;
3468 (P_)prev_frame = (P_)frame + displacement;
3472 tso->sp += displacement;
3473 tso->su = prev_frame;
3476 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3477 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3481 //@node Pausing a thread, Index, Stack squeezing
3482 //@subsection Pausing a thread
3484 /* -----------------------------------------------------------------------------
3487 * We have to prepare for GC - this means doing lazy black holing
3488 * here. We also take the opportunity to do stack squeezing if it's
3490 * -------------------------------------------------------------------------- */
3491 //@cindex threadPaused
3493 threadPaused(StgTSO *tso)
3495 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3496 threadSqueezeStack(tso); /* does black holing too */
3498 threadLazyBlackHole(tso);
3501 /* -----------------------------------------------------------------------------
3503 * -------------------------------------------------------------------------- */
3506 //@cindex printMutOnceList
3508 printMutOnceList(generation *gen)
3510 StgMutClosure *p, *next;
3512 p = gen->mut_once_list;
3515 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3516 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3517 fprintf(stderr, "%p (%s), ",
3518 p, info_type((StgClosure *)p));
3520 fputc('\n', stderr);
3523 //@cindex printMutableList
3525 printMutableList(generation *gen)
3527 StgMutClosure *p, *next;
3532 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3533 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3534 fprintf(stderr, "%p (%s), ",
3535 p, info_type((StgClosure *)p));
3537 fputc('\n', stderr);
3540 //@cindex maybeLarge
3541 static inline rtsBool
3542 maybeLarge(StgClosure *closure)
3544 StgInfoTable *info = get_itbl(closure);
3546 /* closure types that may be found on the new_large_objects list;
3547 see scavenge_large */
3548 return (info->type == MUT_ARR_PTRS ||
3549 info->type == MUT_ARR_PTRS_FROZEN ||
3550 info->type == TSO ||
3551 info->type == ARR_WORDS);
3557 //@node Index, , Pausing a thread
3561 //* GarbageCollect:: @cindex\s-+GarbageCollect
3562 //* MarkRoot:: @cindex\s-+MarkRoot
3563 //* RevertCAFs:: @cindex\s-+RevertCAFs
3564 //* addBlock:: @cindex\s-+addBlock
3565 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3566 //* copy:: @cindex\s-+copy
3567 //* copyPart:: @cindex\s-+copyPart
3568 //* evacuate:: @cindex\s-+evacuate
3569 //* evacuate_large:: @cindex\s-+evacuate_large
3570 //* gcCAFs:: @cindex\s-+gcCAFs
3571 //* isAlive:: @cindex\s-+isAlive
3572 //* maybeLarge:: @cindex\s-+maybeLarge
3573 //* mkMutCons:: @cindex\s-+mkMutCons
3574 //* printMutOnceList:: @cindex\s-+printMutOnceList
3575 //* printMutableList:: @cindex\s-+printMutableList
3576 //* relocate_TSO:: @cindex\s-+relocate_TSO
3577 //* scavenge:: @cindex\s-+scavenge
3578 //* scavenge_large:: @cindex\s-+scavenge_large
3579 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3580 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3581 //* scavenge_one:: @cindex\s-+scavenge_one
3582 //* scavenge_srt:: @cindex\s-+scavenge_srt
3583 //* scavenge_stack:: @cindex\s-+scavenge_stack
3584 //* scavenge_static:: @cindex\s-+scavenge_static
3585 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3586 //* threadPaused:: @cindex\s-+threadPaused
3587 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3588 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3589 //* upd_evacuee:: @cindex\s-+upd_evacuee
3590 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3591 //* zero_static_object_list:: @cindex\s-+zero_static_object_list