1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.61 1999/08/25 16:11:46 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" /* for ReverCAFs prototype */
20 #include "BlockAlloc.h"
22 #include "DebugProf.h"
25 #include "StablePriv.h"
29 /* STATIC OBJECT LIST.
32 * We maintain a linked list of static objects that are still live.
33 * The requirements for this list are:
35 * - we need to scan the list while adding to it, in order to
36 * scavenge all the static objects (in the same way that
37 * breadth-first scavenging works for dynamic objects).
39 * - we need to be able to tell whether an object is already on
40 * the list, to break loops.
42 * Each static object has a "static link field", which we use for
43 * linking objects on to the list. We use a stack-type list, consing
44 * objects on the front as they are added (this means that the
45 * scavenge phase is depth-first, not breadth-first, but that
48 * A separate list is kept for objects that have been scavenged
49 * already - this is so that we can zero all the marks afterwards.
51 * An object is on the list if its static link field is non-zero; this
52 * means that we have to mark the end of the list with '1', not NULL.
54 * Extra notes for generational GC:
56 * Each generation has a static object list associated with it. When
57 * collecting generations up to N, we treat the static object lists
58 * from generations > N as roots.
60 * We build up a static object list while collecting generations 0..N,
61 * which is then appended to the static object list of generation N+1.
63 StgClosure* static_objects; /* live static objects */
64 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
66 /* N is the oldest generation being collected, where the generations
67 * are numbered starting at 0. A major GC (indicated by the major_gc
68 * flag) is when we're collecting all generations. We only attempt to
69 * deal with static objects and GC CAFs when doing a major GC.
72 static rtsBool major_gc;
74 /* Youngest generation that objects should be evacuated to in
75 * evacuate(). (Logically an argument to evacuate, but it's static
76 * a lot of the time so we optimise it into a global variable).
82 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
83 static rtsBool weak_done; /* all done for this pass */
85 /* Flag indicating failure to evacuate an object to the desired
88 static rtsBool failed_to_evac;
90 /* Old to-space (used for two-space collector only)
94 /* Data used for allocation area sizing.
96 lnat new_blocks; /* blocks allocated during this GC */
97 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
99 /* -----------------------------------------------------------------------------
100 Static function declarations
101 -------------------------------------------------------------------------- */
103 static StgClosure * evacuate ( StgClosure *q );
104 static void zero_static_object_list ( StgClosure* first_static );
105 static void zero_mutable_list ( StgMutClosure *first );
106 static void revert_dead_CAFs ( void );
108 static rtsBool traverse_weak_ptr_list ( void );
109 static void cleanup_weak_ptr_list ( StgWeak **list );
111 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
112 static void scavenge_large ( step *step );
113 static void scavenge ( step *step );
114 static void scavenge_static ( void );
115 static void scavenge_mutable_list ( generation *g );
116 static void scavenge_mut_once_list ( generation *g );
119 static void gcCAFs ( void );
122 /* -----------------------------------------------------------------------------
125 For garbage collecting generation N (and all younger generations):
127 - follow all pointers in the root set. the root set includes all
128 mutable objects in all steps in all generations.
130 - for each pointer, evacuate the object it points to into either
131 + to-space in the next higher step in that generation, if one exists,
132 + if the object's generation == N, then evacuate it to the next
133 generation if one exists, or else to-space in the current
135 + if the object's generation < N, then evacuate it to to-space
136 in the next generation.
138 - repeatedly scavenge to-space from each step in each generation
139 being collected until no more objects can be evacuated.
141 - free from-space in each step, and set from-space = to-space.
143 -------------------------------------------------------------------------- */
145 void GarbageCollect(void (*get_roots)(void))
149 lnat live, allocated, collected = 0, copied = 0;
153 CostCentreStack *prev_CCS;
156 /* tell the stats department that we've started a GC */
159 /* attribute any costs to CCS_GC */
165 /* We might have been called from Haskell land by _ccall_GC, in
166 * which case we need to call threadPaused() because the scheduler
167 * won't have done it.
169 if (CurrentTSO) { threadPaused(CurrentTSO); }
171 /* Approximate how much we allocated: number of blocks in the
172 * nursery + blocks allocated via allocate() - unused nusery blocks.
173 * This leaves a little slop at the end of each block, and doesn't
174 * take into account large objects (ToDo).
176 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
177 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
178 allocated -= BLOCK_SIZE_W;
180 if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) {
181 allocated -= (current_nursery->start + BLOCK_SIZE_W)
182 - current_nursery->free;
185 /* Figure out which generation to collect
188 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
189 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
193 major_gc = (N == RtsFlags.GcFlags.generations-1);
195 /* check stack sanity *before* GC (ToDo: check all threads) */
196 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
197 IF_DEBUG(sanity, checkFreeListSanity());
199 /* Initialise the static object lists
201 static_objects = END_OF_STATIC_LIST;
202 scavenged_static_objects = END_OF_STATIC_LIST;
204 /* zero the mutable list for the oldest generation (see comment by
205 * zero_mutable_list below).
208 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
211 /* Save the old to-space if we're doing a two-space collection
213 if (RtsFlags.GcFlags.generations == 1) {
214 old_to_space = g0s0->to_space;
215 g0s0->to_space = NULL;
218 /* Keep a count of how many new blocks we allocated during this GC
219 * (used for resizing the allocation area, later).
223 /* Initialise to-space in all the generations/steps that we're
226 for (g = 0; g <= N; g++) {
227 generations[g].mut_once_list = END_MUT_LIST;
228 generations[g].mut_list = END_MUT_LIST;
230 for (s = 0; s < generations[g].n_steps; s++) {
232 /* generation 0, step 0 doesn't need to-space */
233 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
237 /* Get a free block for to-space. Extra blocks will be chained on
241 step = &generations[g].steps[s];
242 ASSERT(step->gen->no == g);
243 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
244 bd->gen = &generations[g];
247 bd->evacuated = 1; /* it's a to-space block */
248 step->hp = bd->start;
249 step->hpLim = step->hp + BLOCK_SIZE_W;
253 step->scan = bd->start;
255 step->new_large_objects = NULL;
256 step->scavenged_large_objects = NULL;
258 /* mark the large objects as not evacuated yet */
259 for (bd = step->large_objects; bd; bd = bd->link) {
265 /* make sure the older generations have at least one block to
266 * allocate into (this makes things easier for copy(), see below.
268 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
269 for (s = 0; s < generations[g].n_steps; s++) {
270 step = &generations[g].steps[s];
271 if (step->hp_bd == NULL) {
273 bd->gen = &generations[g];
276 bd->evacuated = 0; /* *not* a to-space block */
277 step->hp = bd->start;
278 step->hpLim = step->hp + BLOCK_SIZE_W;
284 /* Set the scan pointer for older generations: remember we
285 * still have to scavenge objects that have been promoted. */
286 step->scan = step->hp;
287 step->scan_bd = step->hp_bd;
288 step->to_space = NULL;
290 step->new_large_objects = NULL;
291 step->scavenged_large_objects = NULL;
295 /* -----------------------------------------------------------------------
296 * follow all the roots that we know about:
297 * - mutable lists from each generation > N
298 * we want to *scavenge* these roots, not evacuate them: they're not
299 * going to move in this GC.
300 * Also: do them in reverse generation order. This is because we
301 * often want to promote objects that are pointed to by older
302 * generations early, so we don't have to repeatedly copy them.
303 * Doing the generations in reverse order ensures that we don't end
304 * up in the situation where we want to evac an object to gen 3 and
305 * it has already been evaced to gen 2.
309 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
310 generations[g].saved_mut_list = generations[g].mut_list;
311 generations[g].mut_list = END_MUT_LIST;
314 /* Do the mut-once lists first */
315 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
316 scavenge_mut_once_list(&generations[g]);
318 for (st = generations[g].n_steps-1; st >= 0; st--) {
319 scavenge(&generations[g].steps[st]);
323 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
324 scavenge_mutable_list(&generations[g]);
326 for (st = generations[g].n_steps-1; st >= 0; st--) {
327 scavenge(&generations[g].steps[st]);
332 /* follow all the roots that the application knows about.
337 /* And don't forget to mark the TSO if we got here direct from
340 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
343 /* Mark the weak pointer list, and prepare to detect dead weak
346 old_weak_ptr_list = weak_ptr_list;
347 weak_ptr_list = NULL;
348 weak_done = rtsFalse;
350 /* Mark the stable pointer table.
352 markStablePtrTable(major_gc);
356 /* ToDo: To fix the caf leak, we need to make the commented out
357 * parts of this code do something sensible - as described in
360 extern void markHugsObjects(void);
365 /* -------------------------------------------------------------------------
366 * Repeatedly scavenge all the areas we know about until there's no
367 * more scavenging to be done.
374 /* scavenge static objects */
375 if (major_gc && static_objects != END_OF_STATIC_LIST) {
379 /* When scavenging the older generations: Objects may have been
380 * evacuated from generations <= N into older generations, and we
381 * need to scavenge these objects. We're going to try to ensure that
382 * any evacuations that occur move the objects into at least the
383 * same generation as the object being scavenged, otherwise we
384 * have to create new entries on the mutable list for the older
388 /* scavenge each step in generations 0..maxgen */
392 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
393 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
394 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
397 step = &generations[gen].steps[st];
399 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
404 if (step->new_large_objects != NULL) {
405 scavenge_large(step);
412 if (flag) { goto loop; }
414 /* must be last... */
415 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
420 /* Final traversal of the weak pointer list (see comment by
421 * cleanUpWeakPtrList below).
423 cleanup_weak_ptr_list(&weak_ptr_list);
425 /* Now see which stable names are still alive.
427 gcStablePtrTable(major_gc);
429 /* revert dead CAFs and update enteredCAFs list */
432 /* Set the maximum blocks for the oldest generation, based on twice
433 * the amount of live data now, adjusted to fit the maximum heap
436 * This is an approximation, since in the worst case we'll need
437 * twice the amount of live data plus whatever space the other
440 if (RtsFlags.GcFlags.generations > 1) {
442 oldest_gen->max_blocks =
443 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
444 RtsFlags.GcFlags.minOldGenSize);
445 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
446 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
447 if (((int)oldest_gen->max_blocks -
448 (int)oldest_gen->steps[0].to_blocks) <
449 (RtsFlags.GcFlags.pcFreeHeap *
450 RtsFlags.GcFlags.maxHeapSize / 200)) {
457 /* run through all the generations/steps and tidy up
459 copied = new_blocks * BLOCK_SIZE_W;
460 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
463 generations[g].collections++; /* for stats */
466 for (s = 0; s < generations[g].n_steps; s++) {
468 step = &generations[g].steps[s];
470 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
471 /* Tidy the end of the to-space chains */
472 step->hp_bd->free = step->hp;
473 step->hp_bd->link = NULL;
474 /* stats information: how much we copied */
476 copied -= step->hp_bd->start + BLOCK_SIZE_W -
481 /* for generations we collected... */
484 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
486 /* free old memory and shift to-space into from-space for all
487 * the collected steps (except the allocation area). These
488 * freed blocks will probaby be quickly recycled.
490 if (!(g == 0 && s == 0)) {
491 freeChain(step->blocks);
492 step->blocks = step->to_space;
493 step->n_blocks = step->to_blocks;
494 step->to_space = NULL;
496 for (bd = step->blocks; bd != NULL; bd = bd->link) {
497 bd->evacuated = 0; /* now from-space */
501 /* LARGE OBJECTS. The current live large objects are chained on
502 * scavenged_large, having been moved during garbage
503 * collection from large_objects. Any objects left on
504 * large_objects list are therefore dead, so we free them here.
506 for (bd = step->large_objects; bd != NULL; bd = next) {
511 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
514 step->large_objects = step->scavenged_large_objects;
516 /* Set the maximum blocks for this generation, interpolating
517 * between the maximum size of the oldest and youngest
520 * max_blocks = oldgen_max_blocks * G
521 * ----------------------
526 generations[g].max_blocks = (oldest_gen->max_blocks * g)
527 / (RtsFlags.GcFlags.generations-1);
529 generations[g].max_blocks = oldest_gen->max_blocks;
532 /* for older generations... */
535 /* For older generations, we need to append the
536 * scavenged_large_object list (i.e. large objects that have been
537 * promoted during this GC) to the large_object list for that step.
539 for (bd = step->scavenged_large_objects; bd; bd = next) {
542 dbl_link_onto(bd, &step->large_objects);
545 /* add the new blocks we promoted during this GC */
546 step->n_blocks += step->to_blocks;
551 /* Guess the amount of live data for stats. */
554 /* Free the small objects allocated via allocate(), since this will
555 * all have been copied into G0S1 now.
557 if (small_alloc_list != NULL) {
558 freeChain(small_alloc_list);
560 small_alloc_list = NULL;
564 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
566 /* Two-space collector:
567 * Free the old to-space, and estimate the amount of live data.
569 if (RtsFlags.GcFlags.generations == 1) {
572 if (old_to_space != NULL) {
573 freeChain(old_to_space);
575 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
576 bd->evacuated = 0; /* now from-space */
579 /* For a two-space collector, we need to resize the nursery. */
581 /* set up a new nursery. Allocate a nursery size based on a
582 * function of the amount of live data (currently a factor of 2,
583 * should be configurable (ToDo)). Use the blocks from the old
584 * nursery if possible, freeing up any left over blocks.
586 * If we get near the maximum heap size, then adjust our nursery
587 * size accordingly. If the nursery is the same size as the live
588 * data (L), then we need 3L bytes. We can reduce the size of the
589 * nursery to bring the required memory down near 2L bytes.
591 * A normal 2-space collector would need 4L bytes to give the same
592 * performance we get from 3L bytes, reducing to the same
593 * performance at 2L bytes.
595 blocks = g0s0->to_blocks;
597 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
598 RtsFlags.GcFlags.maxHeapSize ) {
599 int adjusted_blocks; /* signed on purpose */
602 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
603 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
604 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
605 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
608 blocks = adjusted_blocks;
611 blocks *= RtsFlags.GcFlags.oldGenFactor;
612 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
613 blocks = RtsFlags.GcFlags.minAllocAreaSize;
616 resizeNursery(blocks);
619 /* Generational collector:
620 * If the user has given us a suggested heap size, adjust our
621 * allocation area to make best use of the memory available.
624 if (RtsFlags.GcFlags.heapSizeSuggestion) {
626 nat needed = calcNeeded(); /* approx blocks needed at next GC */
628 /* Guess how much will be live in generation 0 step 0 next time.
629 * A good approximation is the obtained by finding the
630 * percentage of g0s0 that was live at the last minor GC.
633 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
636 /* Estimate a size for the allocation area based on the
637 * information available. We might end up going slightly under
638 * or over the suggested heap size, but we should be pretty
641 * Formula: suggested - needed
642 * ----------------------------
643 * 1 + g0s0_pcnt_kept/100
645 * where 'needed' is the amount of memory needed at the next
646 * collection for collecting all steps except g0s0.
649 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
650 (100 + (int)g0s0_pcnt_kept);
652 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
653 blocks = RtsFlags.GcFlags.minAllocAreaSize;
656 resizeNursery((nat)blocks);
660 /* mark the garbage collected CAFs as dead */
662 if (major_gc) { gcCAFs(); }
665 /* zero the scavenged static object list */
667 zero_static_object_list(scavenged_static_objects);
672 for (bd = g0s0->blocks; bd; bd = bd->link) {
673 bd->free = bd->start;
674 ASSERT(bd->gen == g0);
675 ASSERT(bd->step == g0s0);
676 IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
678 current_nursery = g0s0->blocks;
680 /* start any pending finalizers */
681 scheduleFinalizers(old_weak_ptr_list);
683 /* check sanity after GC */
684 IF_DEBUG(sanity, checkSanity(N));
686 /* extra GC trace info */
687 IF_DEBUG(gc, stat_describe_gens());
690 /* symbol-table based profiling */
691 /* heapCensus(to_space); */ /* ToDo */
694 /* restore enclosing cost centre */
699 /* check for memory leaks if sanity checking is on */
700 IF_DEBUG(sanity, memInventory());
702 /* ok, GC over: tell the stats department what happened. */
703 stat_endGC(allocated, collected, live, copied, N);
706 /* -----------------------------------------------------------------------------
709 traverse_weak_ptr_list is called possibly many times during garbage
710 collection. It returns a flag indicating whether it did any work
711 (i.e. called evacuate on any live pointers).
713 Invariant: traverse_weak_ptr_list is called when the heap is in an
714 idempotent state. That means that there are no pending
715 evacuate/scavenge operations. This invariant helps the weak
716 pointer code decide which weak pointers are dead - if there are no
717 new live weak pointers, then all the currently unreachable ones are
720 For generational GC: we just don't try to finalize weak pointers in
721 older generations than the one we're collecting. This could
722 probably be optimised by keeping per-generation lists of weak
723 pointers, but for a few weak pointers this scheme will work.
724 -------------------------------------------------------------------------- */
727 traverse_weak_ptr_list(void)
729 StgWeak *w, **last_w, *next_w;
731 rtsBool flag = rtsFalse;
733 if (weak_done) { return rtsFalse; }
735 /* doesn't matter where we evacuate values/finalizers to, since
736 * these pointers are treated as roots (iff the keys are alive).
740 last_w = &old_weak_ptr_list;
741 for (w = old_weak_ptr_list; w; w = next_w) {
743 /* First, this weak pointer might have been evacuated. If so,
744 * remove the forwarding pointer from the weak_ptr_list.
746 if (get_itbl(w)->type == EVACUATED) {
747 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
751 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
752 * called on a live weak pointer object. Just remove it.
754 if (w->header.info == &DEAD_WEAK_info) {
755 next_w = ((StgDeadWeak *)w)->link;
760 ASSERT(get_itbl(w)->type == WEAK);
762 /* Now, check whether the key is reachable.
764 if ((new = isAlive(w->key))) {
766 /* evacuate the value and finalizer */
767 w->value = evacuate(w->value);
768 w->finalizer = evacuate(w->finalizer);
769 /* remove this weak ptr from the old_weak_ptr list */
771 /* and put it on the new weak ptr list */
773 w->link = weak_ptr_list;
776 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
786 /* If we didn't make any changes, then we can go round and kill all
787 * the dead weak pointers. The old_weak_ptr list is used as a list
788 * of pending finalizers later on.
790 if (flag == rtsFalse) {
791 cleanup_weak_ptr_list(&old_weak_ptr_list);
792 for (w = old_weak_ptr_list; w; w = w->link) {
793 w->finalizer = evacuate(w->finalizer);
801 /* -----------------------------------------------------------------------------
802 After GC, the live weak pointer list may have forwarding pointers
803 on it, because a weak pointer object was evacuated after being
804 moved to the live weak pointer list. We remove those forwarding
807 Also, we don't consider weak pointer objects to be reachable, but
808 we must nevertheless consider them to be "live" and retain them.
809 Therefore any weak pointer objects which haven't as yet been
810 evacuated need to be evacuated now.
811 -------------------------------------------------------------------------- */
814 cleanup_weak_ptr_list ( StgWeak **list )
816 StgWeak *w, **last_w;
819 for (w = *list; w; w = w->link) {
821 if (get_itbl(w)->type == EVACUATED) {
822 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
826 if (Bdescr((P_)w)->evacuated == 0) {
827 (StgClosure *)w = evacuate((StgClosure *)w);
834 /* -----------------------------------------------------------------------------
835 isAlive determines whether the given closure is still alive (after
836 a garbage collection) or not. It returns the new address of the
837 closure if it is alive, or NULL otherwise.
838 -------------------------------------------------------------------------- */
841 isAlive(StgClosure *p)
843 const StgInfoTable *info;
849 /* ToDo: for static closures, check the static link field.
850 * Problem here is that we sometimes don't set the link field, eg.
851 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
854 /* ignore closures in generations that we're not collecting. */
855 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
859 switch (info->type) {
864 case IND_OLDGEN: /* rely on compatible layout with StgInd */
865 case IND_OLDGEN_PERM:
866 /* follow indirections */
867 p = ((StgInd *)p)->indirectee;
872 return ((StgEvacuated *)p)->evacuee;
882 MarkRoot(StgClosure *root)
884 return evacuate(root);
887 static void addBlock(step *step)
889 bdescr *bd = allocBlock();
893 if (step->gen->no <= N) {
899 step->hp_bd->free = step->hp;
900 step->hp_bd->link = bd;
901 step->hp = bd->start;
902 step->hpLim = step->hp + BLOCK_SIZE_W;
908 static __inline__ void
909 upd_evacuee(StgClosure *p, StgClosure *dest)
911 p->header.info = &EVACUATED_info;
912 ((StgEvacuated *)p)->evacuee = dest;
915 static __inline__ StgClosure *
916 copy(StgClosure *src, nat size, step *step)
920 TICK_GC_WORDS_COPIED(size);
921 /* Find out where we're going, using the handy "to" pointer in
922 * the step of the source object. If it turns out we need to
923 * evacuate to an older generation, adjust it here (see comment
926 if (step->gen->no < evac_gen) {
927 #ifdef NO_EAGER_PROMOTION
928 failed_to_evac = rtsTrue;
930 step = &generations[evac_gen].steps[0];
934 /* chain a new block onto the to-space for the destination step if
937 if (step->hp + size >= step->hpLim) {
941 for(to = step->hp, from = (P_)src; size>0; --size) {
947 upd_evacuee(src,(StgClosure *)dest);
948 return (StgClosure *)dest;
951 /* Special version of copy() for when we only want to copy the info
952 * pointer of an object, but reserve some padding after it. This is
953 * used to optimise evacuation of BLACKHOLEs.
956 static __inline__ StgClosure *
957 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
961 TICK_GC_WORDS_COPIED(size_to_copy);
962 if (step->gen->no < evac_gen) {
963 #ifdef NO_EAGER_PROMOTION
964 failed_to_evac = rtsTrue;
966 step = &generations[evac_gen].steps[0];
970 if (step->hp + size_to_reserve >= step->hpLim) {
974 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
979 step->hp += size_to_reserve;
980 upd_evacuee(src,(StgClosure *)dest);
981 return (StgClosure *)dest;
984 /* -----------------------------------------------------------------------------
985 Evacuate a large object
987 This just consists of removing the object from the (doubly-linked)
988 large_alloc_list, and linking it on to the (singly-linked)
989 new_large_objects list, from where it will be scavenged later.
991 Convention: bd->evacuated is /= 0 for a large object that has been
992 evacuated, or 0 otherwise.
993 -------------------------------------------------------------------------- */
996 evacuate_large(StgPtr p, rtsBool mutable)
998 bdescr *bd = Bdescr(p);
1001 /* should point to the beginning of the block */
1002 ASSERT(((W_)p & BLOCK_MASK) == 0);
1004 /* already evacuated? */
1005 if (bd->evacuated) {
1006 /* Don't forget to set the failed_to_evac flag if we didn't get
1007 * the desired destination (see comments in evacuate()).
1009 if (bd->gen->no < evac_gen) {
1010 failed_to_evac = rtsTrue;
1011 TICK_GC_FAILED_PROMOTION();
1017 /* remove from large_object list */
1019 bd->back->link = bd->link;
1020 } else { /* first object in the list */
1021 step->large_objects = bd->link;
1024 bd->link->back = bd->back;
1027 /* link it on to the evacuated large object list of the destination step
1029 step = bd->step->to;
1030 if (step->gen->no < evac_gen) {
1031 #ifdef NO_EAGER_PROMOTION
1032 failed_to_evac = rtsTrue;
1034 step = &generations[evac_gen].steps[0];
1039 bd->gen = step->gen;
1040 bd->link = step->new_large_objects;
1041 step->new_large_objects = bd;
1045 recordMutable((StgMutClosure *)p);
1049 /* -----------------------------------------------------------------------------
1050 Adding a MUT_CONS to an older generation.
1052 This is necessary from time to time when we end up with an
1053 old-to-new generation pointer in a non-mutable object. We defer
1054 the promotion until the next GC.
1055 -------------------------------------------------------------------------- */
1058 mkMutCons(StgClosure *ptr, generation *gen)
1063 step = &gen->steps[0];
1065 /* chain a new block onto the to-space for the destination step if
1068 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1072 q = (StgMutVar *)step->hp;
1073 step->hp += sizeofW(StgMutVar);
1075 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1077 recordOldToNewPtrs((StgMutClosure *)q);
1079 return (StgClosure *)q;
1082 /* -----------------------------------------------------------------------------
1085 This is called (eventually) for every live object in the system.
1087 The caller to evacuate specifies a desired generation in the
1088 evac_gen global variable. The following conditions apply to
1089 evacuating an object which resides in generation M when we're
1090 collecting up to generation N
1094 else evac to step->to
1096 if M < evac_gen evac to evac_gen, step 0
1098 if the object is already evacuated, then we check which generation
1101 if M >= evac_gen do nothing
1102 if M < evac_gen set failed_to_evac flag to indicate that we
1103 didn't manage to evacuate this object into evac_gen.
1105 -------------------------------------------------------------------------- */
1109 evacuate(StgClosure *q)
1114 const StgInfoTable *info;
1117 if (HEAP_ALLOCED(q)) {
1119 if (bd->gen->no > N) {
1120 /* Can't evacuate this object, because it's in a generation
1121 * older than the ones we're collecting. Let's hope that it's
1122 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1124 if (bd->gen->no < evac_gen) {
1126 failed_to_evac = rtsTrue;
1127 TICK_GC_FAILED_PROMOTION();
1131 step = bd->step->to;
1134 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1137 /* make sure the info pointer is into text space */
1138 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1139 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1142 switch (info -> type) {
1145 return copy(q,bco_sizeW(stgCast(StgBCO*,q)),step);
1148 ASSERT(q->header.info != &MUT_CONS_info);
1150 to = copy(q,sizeW_fromITBL(info),step);
1151 recordMutable((StgMutClosure *)to);
1158 return copy(q,sizeofW(StgHeader)+1,step);
1160 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1165 #ifdef NO_PROMOTE_THUNKS
1166 if (bd->gen->no == 0 &&
1167 bd->step->no != 0 &&
1168 bd->step->no == bd->gen->n_steps-1) {
1172 return copy(q,sizeofW(StgHeader)+2,step);
1180 return copy(q,sizeofW(StgHeader)+2,step);
1186 case IND_OLDGEN_PERM:
1192 return copy(q,sizeW_fromITBL(info),step);
1195 case SE_CAF_BLACKHOLE:
1198 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1201 to = copy(q,BLACKHOLE_sizeW(),step);
1202 recordMutable((StgMutClosure *)to);
1205 case THUNK_SELECTOR:
1207 const StgInfoTable* selectee_info;
1208 StgClosure* selectee = ((StgSelector*)q)->selectee;
1211 selectee_info = get_itbl(selectee);
1212 switch (selectee_info->type) {
1221 StgWord32 offset = info->layout.selector_offset;
1223 /* check that the size is in range */
1225 (StgWord32)(selectee_info->layout.payload.ptrs +
1226 selectee_info->layout.payload.nptrs));
1228 /* perform the selection! */
1229 q = selectee->payload[offset];
1231 /* if we're already in to-space, there's no need to continue
1232 * with the evacuation, just update the source address with
1233 * a pointer to the (evacuated) constructor field.
1235 if (HEAP_ALLOCED(q)) {
1236 bdescr *bd = Bdescr((P_)q);
1237 if (bd->evacuated) {
1238 if (bd->gen->no < evac_gen) {
1239 failed_to_evac = rtsTrue;
1240 TICK_GC_FAILED_PROMOTION();
1246 /* otherwise, carry on and evacuate this constructor field,
1247 * (but not the constructor itself)
1256 case IND_OLDGEN_PERM:
1257 selectee = stgCast(StgInd *,selectee)->indirectee;
1261 selectee = stgCast(StgCAF *,selectee)->value;
1265 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1275 case THUNK_SELECTOR:
1276 /* aargh - do recursively???? */
1279 case SE_CAF_BLACKHOLE:
1283 /* not evaluated yet */
1287 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1288 (int)(selectee_info->type));
1291 return copy(q,THUNK_SELECTOR_sizeW(),step);
1295 /* follow chains of indirections, don't evacuate them */
1296 q = ((StgInd*)q)->indirectee;
1300 if (info->srt_len > 0 && major_gc &&
1301 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1302 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1303 static_objects = (StgClosure *)q;
1308 if (info->srt_len > 0 && major_gc &&
1309 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1310 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1311 static_objects = (StgClosure *)q;
1316 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1317 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1318 static_objects = (StgClosure *)q;
1323 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1324 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1325 static_objects = (StgClosure *)q;
1329 case CONSTR_INTLIKE:
1330 case CONSTR_CHARLIKE:
1331 case CONSTR_NOCAF_STATIC:
1332 /* no need to put these on the static linked list, they don't need
1347 /* shouldn't see these */
1348 barf("evacuate: stack frame\n");
1352 /* these are special - the payload is a copy of a chunk of stack,
1354 return copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1357 /* Already evacuated, just return the forwarding address.
1358 * HOWEVER: if the requested destination generation (evac_gen) is
1359 * older than the actual generation (because the object was
1360 * already evacuated to a younger generation) then we have to
1361 * set the failed_to_evac flag to indicate that we couldn't
1362 * manage to promote the object to the desired generation.
1364 if (evac_gen > 0) { /* optimisation */
1365 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1366 if (Bdescr((P_)p)->gen->no < evac_gen) {
1367 /* fprintf(stderr,"evac failed!\n");*/
1368 failed_to_evac = rtsTrue;
1369 TICK_GC_FAILED_PROMOTION();
1372 return ((StgEvacuated*)q)->evacuee;
1376 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1378 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1379 evacuate_large((P_)q, rtsFalse);
1382 /* just copy the block */
1383 return copy(q,size,step);
1388 case MUT_ARR_PTRS_FROZEN:
1390 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1392 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1393 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1396 /* just copy the block */
1397 to = copy(q,size,step);
1398 if (info->type == MUT_ARR_PTRS) {
1399 recordMutable((StgMutClosure *)to);
1407 StgTSO *tso = stgCast(StgTSO *,q);
1408 nat size = tso_sizeW(tso);
1411 /* Large TSOs don't get moved, so no relocation is required.
1413 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1414 evacuate_large((P_)q, rtsTrue);
1417 /* To evacuate a small TSO, we need to relocate the update frame
1421 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1423 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1425 /* relocate the stack pointers... */
1426 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1427 new_tso->sp = (StgPtr)new_tso->sp + diff;
1428 new_tso->splim = (StgPtr)new_tso->splim + diff;
1430 relocate_TSO(tso, new_tso);
1432 recordMutable((StgMutClosure *)new_tso);
1433 return (StgClosure *)new_tso;
1439 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1443 barf("evacuate: strange closure type %d", (int)(info->type));
1449 /* -----------------------------------------------------------------------------
1450 relocate_TSO is called just after a TSO has been copied from src to
1451 dest. It adjusts the update frame list for the new location.
1452 -------------------------------------------------------------------------- */
1455 relocate_TSO(StgTSO *src, StgTSO *dest)
1462 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1466 while ((P_)su < dest->stack + dest->stack_size) {
1467 switch (get_itbl(su)->type) {
1469 /* GCC actually manages to common up these three cases! */
1472 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1477 cf = (StgCatchFrame *)su;
1478 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1483 sf = (StgSeqFrame *)su;
1484 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1493 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1502 scavenge_srt(const StgInfoTable *info)
1504 StgClosure **srt, **srt_end;
1506 /* evacuate the SRT. If srt_len is zero, then there isn't an
1507 * srt field in the info table. That's ok, because we'll
1508 * never dereference it.
1510 srt = stgCast(StgClosure **,info->srt);
1511 srt_end = srt + info->srt_len;
1512 for (; srt < srt_end; srt++) {
1513 /* Special-case to handle references to closures hiding out in DLLs, since
1514 double indirections required to get at those. The code generator knows
1515 which is which when generating the SRT, so it stores the (indirect)
1516 reference to the DLL closure in the table by first adding one to it.
1517 We check for this here, and undo the addition before evacuating it.
1519 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1520 closure that's fixed at link-time, and no extra magic is required.
1522 #ifdef ENABLE_WIN32_DLL_SUPPORT
1523 if ( stgCast(unsigned long,*srt) & 0x1 ) {
1524 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1534 /* -----------------------------------------------------------------------------
1535 Scavenge a given step until there are no more objects in this step
1538 evac_gen is set by the caller to be either zero (for a step in a
1539 generation < N) or G where G is the generation of the step being
1542 We sometimes temporarily change evac_gen back to zero if we're
1543 scavenging a mutable object where early promotion isn't such a good
1545 -------------------------------------------------------------------------- */
1549 scavenge(step *step)
1552 const StgInfoTable *info;
1554 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1559 failed_to_evac = rtsFalse;
1561 /* scavenge phase - standard breadth-first scavenging of the
1565 while (bd != step->hp_bd || p < step->hp) {
1567 /* If we're at the end of this block, move on to the next block */
1568 if (bd != step->hp_bd && p == bd->free) {
1574 q = p; /* save ptr to object */
1576 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1577 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1579 info = get_itbl((StgClosure *)p);
1580 switch (info -> type) {
1584 StgBCO* bco = stgCast(StgBCO*,p);
1586 for (i = 0; i < bco->n_ptrs; i++) {
1587 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1589 p += bco_sizeW(bco);
1594 /* treat MVars specially, because we don't want to evacuate the
1595 * mut_link field in the middle of the closure.
1598 StgMVar *mvar = ((StgMVar *)p);
1600 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1601 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1602 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1603 p += sizeofW(StgMVar);
1604 evac_gen = saved_evac_gen;
1612 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1613 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1614 p += sizeofW(StgHeader) + 2;
1619 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1620 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1626 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1627 p += sizeofW(StgHeader) + 1;
1632 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1638 p += sizeofW(StgHeader) + 1;
1645 p += sizeofW(StgHeader) + 2;
1652 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1653 p += sizeofW(StgHeader) + 2;
1668 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1669 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1670 (StgClosure *)*p = evacuate((StgClosure *)*p);
1672 p += info->layout.payload.nptrs;
1677 if (step->gen->no != 0) {
1678 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1681 case IND_OLDGEN_PERM:
1682 ((StgIndOldGen *)p)->indirectee =
1683 evacuate(((StgIndOldGen *)p)->indirectee);
1684 if (failed_to_evac) {
1685 failed_to_evac = rtsFalse;
1686 recordOldToNewPtrs((StgMutClosure *)p);
1688 p += sizeofW(StgIndOldGen);
1693 StgCAF *caf = (StgCAF *)p;
1695 caf->body = evacuate(caf->body);
1696 if (failed_to_evac) {
1697 failed_to_evac = rtsFalse;
1698 recordOldToNewPtrs((StgMutClosure *)p);
1700 caf->mut_link = NULL;
1702 p += sizeofW(StgCAF);
1708 StgCAF *caf = (StgCAF *)p;
1710 caf->body = evacuate(caf->body);
1711 caf->value = evacuate(caf->value);
1712 if (failed_to_evac) {
1713 failed_to_evac = rtsFalse;
1714 recordOldToNewPtrs((StgMutClosure *)p);
1716 caf->mut_link = NULL;
1718 p += sizeofW(StgCAF);
1723 /* ignore MUT_CONSs */
1724 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1726 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1727 evac_gen = saved_evac_gen;
1729 p += sizeofW(StgMutVar);
1733 case SE_CAF_BLACKHOLE:
1736 p += BLACKHOLE_sizeW();
1741 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1742 (StgClosure *)bh->blocking_queue =
1743 evacuate((StgClosure *)bh->blocking_queue);
1744 if (failed_to_evac) {
1745 failed_to_evac = rtsFalse;
1746 recordMutable((StgMutClosure *)bh);
1748 p += BLACKHOLE_sizeW();
1752 case THUNK_SELECTOR:
1754 StgSelector *s = (StgSelector *)p;
1755 s->selectee = evacuate(s->selectee);
1756 p += THUNK_SELECTOR_sizeW();
1762 barf("scavenge:IND???\n");
1764 case CONSTR_INTLIKE:
1765 case CONSTR_CHARLIKE:
1767 case CONSTR_NOCAF_STATIC:
1771 /* Shouldn't see a static object here. */
1772 barf("scavenge: STATIC object\n");
1784 /* Shouldn't see stack frames here. */
1785 barf("scavenge: stack frame\n");
1787 case AP_UPD: /* same as PAPs */
1789 /* Treat a PAP just like a section of stack, not forgetting to
1790 * evacuate the function pointer too...
1793 StgPAP* pap = stgCast(StgPAP*,p);
1795 pap->fun = evacuate(pap->fun);
1796 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1797 p += pap_sizeW(pap);
1802 /* nothing to follow */
1803 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1807 /* follow everything */
1811 evac_gen = 0; /* repeatedly mutable */
1812 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1813 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1814 (StgClosure *)*p = evacuate((StgClosure *)*p);
1816 evac_gen = saved_evac_gen;
1820 case MUT_ARR_PTRS_FROZEN:
1821 /* follow everything */
1823 StgPtr start = p, next;
1825 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1826 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1827 (StgClosure *)*p = evacuate((StgClosure *)*p);
1829 if (failed_to_evac) {
1830 /* we can do this easier... */
1831 recordMutable((StgMutClosure *)start);
1832 failed_to_evac = rtsFalse;
1843 /* chase the link field for any TSOs on the same queue */
1844 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1845 if ( tso->why_blocked == BlockedOnMVar
1846 || tso->why_blocked == BlockedOnBlackHole) {
1847 tso->block_info.closure = evacuate(tso->block_info.closure);
1849 /* scavenge this thread's stack */
1850 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1851 evac_gen = saved_evac_gen;
1852 p += tso_sizeW(tso);
1859 barf("scavenge: unimplemented/strange closure type\n");
1865 /* If we didn't manage to promote all the objects pointed to by
1866 * the current object, then we have to designate this object as
1867 * mutable (because it contains old-to-new generation pointers).
1869 if (failed_to_evac) {
1870 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1871 failed_to_evac = rtsFalse;
1879 /* -----------------------------------------------------------------------------
1880 Scavenge one object.
1882 This is used for objects that are temporarily marked as mutable
1883 because they contain old-to-new generation pointers. Only certain
1884 objects can have this property.
1885 -------------------------------------------------------------------------- */
1887 scavenge_one(StgClosure *p)
1889 const StgInfoTable *info;
1892 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1893 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1897 switch (info -> type) {
1900 case FUN_1_0: /* hardly worth specialising these guys */
1920 case IND_OLDGEN_PERM:
1925 end = (P_)p->payload + info->layout.payload.ptrs;
1926 for (q = (P_)p->payload; q < end; q++) {
1927 (StgClosure *)*q = evacuate((StgClosure *)*q);
1933 case SE_CAF_BLACKHOLE:
1938 case THUNK_SELECTOR:
1940 StgSelector *s = (StgSelector *)p;
1941 s->selectee = evacuate(s->selectee);
1945 case AP_UPD: /* same as PAPs */
1947 /* Treat a PAP just like a section of stack, not forgetting to
1948 * evacuate the function pointer too...
1951 StgPAP* pap = (StgPAP *)p;
1953 pap->fun = evacuate(pap->fun);
1954 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1959 /* This might happen if for instance a MUT_CONS was pointing to a
1960 * THUNK which has since been updated. The IND_OLDGEN will
1961 * be on the mutable list anyway, so we don't need to do anything
1967 barf("scavenge_one: strange object");
1970 no_luck = failed_to_evac;
1971 failed_to_evac = rtsFalse;
1976 /* -----------------------------------------------------------------------------
1977 Scavenging mutable lists.
1979 We treat the mutable list of each generation > N (i.e. all the
1980 generations older than the one being collected) as roots. We also
1981 remove non-mutable objects from the mutable list at this point.
1982 -------------------------------------------------------------------------- */
1985 scavenge_mut_once_list(generation *gen)
1987 const StgInfoTable *info;
1988 StgMutClosure *p, *next, *new_list;
1990 p = gen->mut_once_list;
1991 new_list = END_MUT_LIST;
1995 failed_to_evac = rtsFalse;
1997 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
1999 /* make sure the info pointer is into text space */
2000 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2001 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2004 switch(info->type) {
2007 case IND_OLDGEN_PERM:
2009 /* Try to pull the indirectee into this generation, so we can
2010 * remove the indirection from the mutable list.
2012 ((StgIndOldGen *)p)->indirectee =
2013 evacuate(((StgIndOldGen *)p)->indirectee);
2016 /* Debugging code to print out the size of the thing we just
2020 StgPtr start = gen->steps[0].scan;
2021 bdescr *start_bd = gen->steps[0].scan_bd;
2023 scavenge(&gen->steps[0]);
2024 if (start_bd != gen->steps[0].scan_bd) {
2025 size += (P_)BLOCK_ROUND_UP(start) - start;
2026 start_bd = start_bd->link;
2027 while (start_bd != gen->steps[0].scan_bd) {
2028 size += BLOCK_SIZE_W;
2029 start_bd = start_bd->link;
2031 size += gen->steps[0].scan -
2032 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2034 size = gen->steps[0].scan - start;
2036 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2040 /* failed_to_evac might happen if we've got more than two
2041 * generations, we're collecting only generation 0, the
2042 * indirection resides in generation 2 and the indirectee is
2045 if (failed_to_evac) {
2046 failed_to_evac = rtsFalse;
2047 p->mut_link = new_list;
2050 /* the mut_link field of an IND_STATIC is overloaded as the
2051 * static link field too (it just so happens that we don't need
2052 * both at the same time), so we need to NULL it out when
2053 * removing this object from the mutable list because the static
2054 * link fields are all assumed to be NULL before doing a major
2062 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2063 * it from the mutable list if possible by promoting whatever it
2066 ASSERT(p->header.info == &MUT_CONS_info);
2067 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2068 /* didn't manage to promote everything, so put the
2069 * MUT_CONS back on the list.
2071 p->mut_link = new_list;
2078 StgCAF *caf = (StgCAF *)p;
2079 caf->body = evacuate(caf->body);
2080 caf->value = evacuate(caf->value);
2081 if (failed_to_evac) {
2082 failed_to_evac = rtsFalse;
2083 p->mut_link = new_list;
2093 StgCAF *caf = (StgCAF *)p;
2094 caf->body = evacuate(caf->body);
2095 if (failed_to_evac) {
2096 failed_to_evac = rtsFalse;
2097 p->mut_link = new_list;
2106 /* shouldn't have anything else on the mutables list */
2107 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2111 gen->mut_once_list = new_list;
2116 scavenge_mutable_list(generation *gen)
2118 const StgInfoTable *info;
2119 StgMutClosure *p, *next;
2121 p = gen->saved_mut_list;
2125 failed_to_evac = rtsFalse;
2127 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2129 /* make sure the info pointer is into text space */
2130 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2131 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2134 switch(info->type) {
2136 case MUT_ARR_PTRS_FROZEN:
2137 /* remove this guy from the mutable list, but follow the ptrs
2138 * anyway (and make sure they get promoted to this gen).
2143 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2145 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2146 (StgClosure *)*q = evacuate((StgClosure *)*q);
2150 if (failed_to_evac) {
2151 failed_to_evac = rtsFalse;
2152 p->mut_link = gen->mut_list;
2159 /* follow everything */
2160 p->mut_link = gen->mut_list;
2165 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2166 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2167 (StgClosure *)*q = evacuate((StgClosure *)*q);
2173 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2174 * it from the mutable list if possible by promoting whatever it
2177 ASSERT(p->header.info != &MUT_CONS_info);
2178 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2179 p->mut_link = gen->mut_list;
2185 StgMVar *mvar = (StgMVar *)p;
2186 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2187 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2188 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2189 p->mut_link = gen->mut_list;
2196 StgTSO *tso = (StgTSO *)p;
2198 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2199 if ( tso->why_blocked == BlockedOnMVar
2200 || tso->why_blocked == BlockedOnBlackHole) {
2201 tso->block_info.closure = evacuate(tso->block_info.closure);
2203 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2205 /* Don't take this TSO off the mutable list - it might still
2206 * point to some younger objects (because we set evac_gen to 0
2209 tso->mut_link = gen->mut_list;
2210 gen->mut_list = (StgMutClosure *)tso;
2216 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2217 (StgClosure *)bh->blocking_queue =
2218 evacuate((StgClosure *)bh->blocking_queue);
2219 p->mut_link = gen->mut_list;
2225 /* shouldn't have anything else on the mutables list */
2226 barf("scavenge_mut_list: strange object? %d", (int)(info->type));
2232 scavenge_static(void)
2234 StgClosure* p = static_objects;
2235 const StgInfoTable *info;
2237 /* Always evacuate straight to the oldest generation for static
2239 evac_gen = oldest_gen->no;
2241 /* keep going until we've scavenged all the objects on the linked
2243 while (p != END_OF_STATIC_LIST) {
2247 /* make sure the info pointer is into text space */
2248 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2249 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2251 /* Take this object *off* the static_objects list,
2252 * and put it on the scavenged_static_objects list.
2254 static_objects = STATIC_LINK(info,p);
2255 STATIC_LINK(info,p) = scavenged_static_objects;
2256 scavenged_static_objects = p;
2258 switch (info -> type) {
2262 StgInd *ind = (StgInd *)p;
2263 ind->indirectee = evacuate(ind->indirectee);
2265 /* might fail to evacuate it, in which case we have to pop it
2266 * back on the mutable list (and take it off the
2267 * scavenged_static list because the static link and mut link
2268 * pointers are one and the same).
2270 if (failed_to_evac) {
2271 failed_to_evac = rtsFalse;
2272 scavenged_static_objects = STATIC_LINK(info,p);
2273 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2274 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2288 next = (P_)p->payload + info->layout.payload.ptrs;
2289 /* evacuate the pointers */
2290 for (q = (P_)p->payload; q < next; q++) {
2291 (StgClosure *)*q = evacuate((StgClosure *)*q);
2297 barf("scavenge_static");
2300 ASSERT(failed_to_evac == rtsFalse);
2302 /* get the next static object from the list. Remeber, there might
2303 * be more stuff on this list now that we've done some evacuating!
2304 * (static_objects is a global)
2310 /* -----------------------------------------------------------------------------
2311 scavenge_stack walks over a section of stack and evacuates all the
2312 objects pointed to by it. We can use the same code for walking
2313 PAPs, since these are just sections of copied stack.
2314 -------------------------------------------------------------------------- */
2317 scavenge_stack(StgPtr p, StgPtr stack_end)
2320 const StgInfoTable* info;
2324 * Each time around this loop, we are looking at a chunk of stack
2325 * that starts with either a pending argument section or an
2326 * activation record.
2329 while (p < stack_end) {
2332 /* If we've got a tag, skip over that many words on the stack */
2333 if (IS_ARG_TAG((W_)q)) {
2338 /* Is q a pointer to a closure?
2340 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2342 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2343 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
2345 /* otherwise, must be a pointer into the allocation space. */
2348 (StgClosure *)*p = evacuate((StgClosure *)q);
2354 * Otherwise, q must be the info pointer of an activation
2355 * record. All activation records have 'bitmap' style layout
2358 info = get_itbl((StgClosure *)p);
2360 switch (info->type) {
2362 /* Dynamic bitmap: the mask is stored on the stack */
2364 bitmap = ((StgRetDyn *)p)->liveness;
2365 p = (P_)&((StgRetDyn *)p)->payload[0];
2368 /* probably a slow-entry point return address: */
2374 /* Specialised code for update frames, since they're so common.
2375 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2376 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2380 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2382 nat type = get_itbl(frame->updatee)->type;
2384 p += sizeofW(StgUpdateFrame);
2385 if (type == EVACUATED) {
2386 frame->updatee = evacuate(frame->updatee);
2389 bdescr *bd = Bdescr((P_)frame->updatee);
2391 if (bd->gen->no > N) {
2392 if (bd->gen->no < evac_gen) {
2393 failed_to_evac = rtsTrue;
2398 /* Don't promote blackholes */
2400 if (!(step->gen->no == 0 &&
2402 step->no == step->gen->n_steps-1)) {
2409 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2410 sizeofW(StgHeader), step);
2411 frame->updatee = to;
2414 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2415 frame->updatee = to;
2416 recordMutable((StgMutClosure *)to);
2419 /* will never be SE_{,CAF_}BLACKHOLE, since we
2420 don't push an update frame for single-entry thunks. KSW 1999-01. */
2421 barf("scavenge_stack: UPDATE_FRAME updatee");
2426 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2433 bitmap = info->layout.bitmap;
2436 while (bitmap != 0) {
2437 if ((bitmap & 1) == 0) {
2438 (StgClosure *)*p = evacuate((StgClosure *)*p);
2441 bitmap = bitmap >> 1;
2448 /* large bitmap (> 32 entries) */
2453 StgLargeBitmap *large_bitmap;
2456 large_bitmap = info->layout.large_bitmap;
2459 for (i=0; i<large_bitmap->size; i++) {
2460 bitmap = large_bitmap->bitmap[i];
2461 q = p + sizeof(W_) * 8;
2462 while (bitmap != 0) {
2463 if ((bitmap & 1) == 0) {
2464 (StgClosure *)*p = evacuate((StgClosure *)*p);
2467 bitmap = bitmap >> 1;
2469 if (i+1 < large_bitmap->size) {
2471 (StgClosure *)*p = evacuate((StgClosure *)*p);
2477 /* and don't forget to follow the SRT */
2482 barf("scavenge_stack: weird activation record found on stack.\n");
2487 /*-----------------------------------------------------------------------------
2488 scavenge the large object list.
2490 evac_gen set by caller; similar games played with evac_gen as with
2491 scavenge() - see comment at the top of scavenge(). Most large
2492 objects are (repeatedly) mutable, so most of the time evac_gen will
2494 --------------------------------------------------------------------------- */
2497 scavenge_large(step *step)
2501 const StgInfoTable* info;
2502 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2504 evac_gen = 0; /* most objects are mutable */
2505 bd = step->new_large_objects;
2507 for (; bd != NULL; bd = step->new_large_objects) {
2509 /* take this object *off* the large objects list and put it on
2510 * the scavenged large objects list. This is so that we can
2511 * treat new_large_objects as a stack and push new objects on
2512 * the front when evacuating.
2514 step->new_large_objects = bd->link;
2515 dbl_link_onto(bd, &step->scavenged_large_objects);
2518 info = get_itbl(stgCast(StgClosure*,p));
2520 switch (info->type) {
2522 /* only certain objects can be "large"... */
2525 /* nothing to follow */
2529 /* follow everything */
2533 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2534 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2535 (StgClosure *)*p = evacuate((StgClosure *)*p);
2540 case MUT_ARR_PTRS_FROZEN:
2541 /* follow everything */
2543 StgPtr start = p, next;
2545 evac_gen = saved_evac_gen; /* not really mutable */
2546 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2547 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2548 (StgClosure *)*p = evacuate((StgClosure *)*p);
2551 if (failed_to_evac) {
2552 recordMutable((StgMutClosure *)start);
2559 StgBCO* bco = stgCast(StgBCO*,p);
2561 evac_gen = saved_evac_gen;
2562 for (i = 0; i < bco->n_ptrs; i++) {
2563 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2574 /* chase the link field for any TSOs on the same queue */
2575 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2576 if ( tso->why_blocked == BlockedOnMVar
2577 || tso->why_blocked == BlockedOnBlackHole) {
2578 tso->block_info.closure = evacuate(tso->block_info.closure);
2580 /* scavenge this thread's stack */
2581 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2586 barf("scavenge_large: unknown/strange object");
2592 zero_static_object_list(StgClosure* first_static)
2596 const StgInfoTable *info;
2598 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2600 link = STATIC_LINK(info, p);
2601 STATIC_LINK(info,p) = NULL;
2605 /* This function is only needed because we share the mutable link
2606 * field with the static link field in an IND_STATIC, so we have to
2607 * zero the mut_link field before doing a major GC, which needs the
2608 * static link field.
2610 * It doesn't do any harm to zero all the mutable link fields on the
2614 zero_mutable_list( StgMutClosure *first )
2616 StgMutClosure *next, *c;
2618 for (c = first; c != END_MUT_LIST; c = next) {
2624 /* -----------------------------------------------------------------------------
2626 -------------------------------------------------------------------------- */
2628 void RevertCAFs(void)
2630 while (enteredCAFs != END_CAF_LIST) {
2631 StgCAF* caf = enteredCAFs;
2633 enteredCAFs = caf->link;
2634 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2635 SET_INFO(caf,&CAF_UNENTERED_info);
2636 caf->value = stgCast(StgClosure*,0xdeadbeef);
2637 caf->link = stgCast(StgCAF*,0xdeadbeef);
2639 enteredCAFs = END_CAF_LIST;
2642 void revert_dead_CAFs(void)
2644 StgCAF* caf = enteredCAFs;
2645 enteredCAFs = END_CAF_LIST;
2646 while (caf != END_CAF_LIST) {
2649 new = (StgCAF*)isAlive((StgClosure*)caf);
2651 new->link = enteredCAFs;
2655 SET_INFO(caf,&CAF_UNENTERED_info);
2656 caf->value = (StgClosure*)0xdeadbeef;
2657 caf->link = (StgCAF*)0xdeadbeef;
2663 /* -----------------------------------------------------------------------------
2664 Sanity code for CAF garbage collection.
2666 With DEBUG turned on, we manage a CAF list in addition to the SRT
2667 mechanism. After GC, we run down the CAF list and blackhole any
2668 CAFs which have been garbage collected. This means we get an error
2669 whenever the program tries to enter a garbage collected CAF.
2671 Any garbage collected CAFs are taken off the CAF list at the same
2673 -------------------------------------------------------------------------- */
2681 const StgInfoTable *info;
2692 ASSERT(info->type == IND_STATIC);
2694 if (STATIC_LINK(info,p) == NULL) {
2695 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2697 SET_INFO(p,&BLACKHOLE_info);
2698 p = STATIC_LINK2(info,p);
2702 pp = &STATIC_LINK2(info,p);
2709 /* fprintf(stderr, "%d CAFs live\n", i); */
2713 /* -----------------------------------------------------------------------------
2716 Whenever a thread returns to the scheduler after possibly doing
2717 some work, we have to run down the stack and black-hole all the
2718 closures referred to by update frames.
2719 -------------------------------------------------------------------------- */
2722 threadLazyBlackHole(StgTSO *tso)
2724 StgUpdateFrame *update_frame;
2725 StgBlockingQueue *bh;
2728 stack_end = &tso->stack[tso->stack_size];
2729 update_frame = tso->su;
2732 switch (get_itbl(update_frame)->type) {
2735 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2739 bh = (StgBlockingQueue *)update_frame->updatee;
2741 /* if the thunk is already blackholed, it means we've also
2742 * already blackholed the rest of the thunks on this stack,
2743 * so we can stop early.
2745 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
2746 * don't interfere with this optimisation.
2748 if (bh->header.info == &BLACKHOLE_info) {
2752 if (bh->header.info != &BLACKHOLE_BQ_info &&
2753 bh->header.info != &CAF_BLACKHOLE_info) {
2754 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
2755 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
2757 SET_INFO(bh,&BLACKHOLE_info);
2760 update_frame = update_frame->link;
2764 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2770 barf("threadPaused");
2775 /* -----------------------------------------------------------------------------
2778 * Code largely pinched from old RTS, then hacked to bits. We also do
2779 * lazy black holing here.
2781 * -------------------------------------------------------------------------- */
2784 threadSqueezeStack(StgTSO *tso)
2786 lnat displacement = 0;
2787 StgUpdateFrame *frame;
2788 StgUpdateFrame *next_frame; /* Temporally next */
2789 StgUpdateFrame *prev_frame; /* Temporally previous */
2791 rtsBool prev_was_update_frame;
2793 bottom = &(tso->stack[tso->stack_size]);
2796 /* There must be at least one frame, namely the STOP_FRAME.
2798 ASSERT((P_)frame < bottom);
2800 /* Walk down the stack, reversing the links between frames so that
2801 * we can walk back up as we squeeze from the bottom. Note that
2802 * next_frame and prev_frame refer to next and previous as they were
2803 * added to the stack, rather than the way we see them in this
2804 * walk. (It makes the next loop less confusing.)
2806 * Stop if we find an update frame pointing to a black hole
2807 * (see comment in threadLazyBlackHole()).
2811 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
2812 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
2813 prev_frame = frame->link;
2814 frame->link = next_frame;
2817 if (get_itbl(frame)->type == UPDATE_FRAME
2818 && frame->updatee->header.info == &BLACKHOLE_info) {
2823 /* Now, we're at the bottom. Frame points to the lowest update
2824 * frame on the stack, and its link actually points to the frame
2825 * above. We have to walk back up the stack, squeezing out empty
2826 * update frames and turning the pointers back around on the way
2829 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2830 * we never want to eliminate it anyway. Just walk one step up
2831 * before starting to squeeze. When you get to the topmost frame,
2832 * remember that there are still some words above it that might have
2839 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2842 * Loop through all of the frames (everything except the very
2843 * bottom). Things are complicated by the fact that we have
2844 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2845 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2847 while (frame != NULL) {
2849 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2850 rtsBool is_update_frame;
2852 next_frame = frame->link;
2853 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2856 * 1. both the previous and current frame are update frames
2857 * 2. the current frame is empty
2859 if (prev_was_update_frame && is_update_frame &&
2860 (P_)prev_frame == frame_bottom + displacement) {
2862 /* Now squeeze out the current frame */
2863 StgClosure *updatee_keep = prev_frame->updatee;
2864 StgClosure *updatee_bypass = frame->updatee;
2867 fprintf(stderr, "squeezing frame at %p\n", frame);
2870 /* Deal with blocking queues. If both updatees have blocked
2871 * threads, then we should merge the queues into the update
2872 * frame that we're keeping.
2874 * Alternatively, we could just wake them up: they'll just go
2875 * straight to sleep on the proper blackhole! This is less code
2876 * and probably less bug prone, although it's probably much
2879 #if 0 /* do it properly... */
2880 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
2881 # error Unimplemented lazy BH warning. (KSW 1999-01)
2883 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
2884 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
2886 /* Sigh. It has one. Don't lose those threads! */
2887 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2888 /* Urgh. Two queues. Merge them. */
2889 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2891 while (keep_tso->link != END_TSO_QUEUE) {
2892 keep_tso = keep_tso->link;
2894 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2897 /* For simplicity, just swap the BQ for the BH */
2898 P_ temp = updatee_keep;
2900 updatee_keep = updatee_bypass;
2901 updatee_bypass = temp;
2903 /* Record the swap in the kept frame (below) */
2904 prev_frame->updatee = updatee_keep;
2909 TICK_UPD_SQUEEZED();
2910 /* wasn't there something about update squeezing and ticky to be sorted out?
2911 * oh yes: we aren't counting each enter properly in this case. See the log somewhere.
2913 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2915 sp = (P_)frame - 1; /* sp = stuff to slide */
2916 displacement += sizeofW(StgUpdateFrame);
2919 /* No squeeze for this frame */
2920 sp = frame_bottom - 1; /* Keep the current frame */
2922 /* Do lazy black-holing.
2924 if (is_update_frame) {
2925 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2926 if (bh->header.info != &BLACKHOLE_BQ_info &&
2927 bh->header.info != &CAF_BLACKHOLE_info) {
2928 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
2929 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
2931 SET_INFO(bh,&BLACKHOLE_info);
2935 /* Fix the link in the current frame (should point to the frame below) */
2936 frame->link = prev_frame;
2937 prev_was_update_frame = is_update_frame;
2940 /* Now slide all words from sp up to the next frame */
2942 if (displacement > 0) {
2943 P_ next_frame_bottom;
2945 if (next_frame != NULL)
2946 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2948 next_frame_bottom = tso->sp - 1;
2951 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2955 while (sp >= next_frame_bottom) {
2956 sp[displacement] = *sp;
2960 (P_)prev_frame = (P_)frame + displacement;
2964 tso->sp += displacement;
2965 tso->su = prev_frame;
2968 /* -----------------------------------------------------------------------------
2971 * We have to prepare for GC - this means doing lazy black holing
2972 * here. We also take the opportunity to do stack squeezing if it's
2974 * -------------------------------------------------------------------------- */
2977 threadPaused(StgTSO *tso)
2979 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2980 threadSqueezeStack(tso); /* does black holing too */
2982 threadLazyBlackHole(tso);