1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.64 1999/11/01 18:17:45 sewardj Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" /* for ReverCAFs prototype */
20 #include "BlockAlloc.h"
25 #include "StablePriv.h"
29 /* STATIC OBJECT LIST.
32 * We maintain a linked list of static objects that are still live.
33 * The requirements for this list are:
35 * - we need to scan the list while adding to it, in order to
36 * scavenge all the static objects (in the same way that
37 * breadth-first scavenging works for dynamic objects).
39 * - we need to be able to tell whether an object is already on
40 * the list, to break loops.
42 * Each static object has a "static link field", which we use for
43 * linking objects on to the list. We use a stack-type list, consing
44 * objects on the front as they are added (this means that the
45 * scavenge phase is depth-first, not breadth-first, but that
48 * A separate list is kept for objects that have been scavenged
49 * already - this is so that we can zero all the marks afterwards.
51 * An object is on the list if its static link field is non-zero; this
52 * means that we have to mark the end of the list with '1', not NULL.
54 * Extra notes for generational GC:
56 * Each generation has a static object list associated with it. When
57 * collecting generations up to N, we treat the static object lists
58 * from generations > N as roots.
60 * We build up a static object list while collecting generations 0..N,
61 * which is then appended to the static object list of generation N+1.
63 StgClosure* static_objects; /* live static objects */
64 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
66 /* N is the oldest generation being collected, where the generations
67 * are numbered starting at 0. A major GC (indicated by the major_gc
68 * flag) is when we're collecting all generations. We only attempt to
69 * deal with static objects and GC CAFs when doing a major GC.
72 static rtsBool major_gc;
74 /* Youngest generation that objects should be evacuated to in
75 * evacuate(). (Logically an argument to evacuate, but it's static
76 * a lot of the time so we optimise it into a global variable).
82 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
83 static rtsBool weak_done; /* all done for this pass */
85 /* Flag indicating failure to evacuate an object to the desired
88 static rtsBool failed_to_evac;
90 /* Old to-space (used for two-space collector only)
94 /* Data used for allocation area sizing.
96 lnat new_blocks; /* blocks allocated during this GC */
97 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
99 /* -----------------------------------------------------------------------------
100 Static function declarations
101 -------------------------------------------------------------------------- */
103 static StgClosure * evacuate ( StgClosure *q );
104 static void zero_static_object_list ( StgClosure* first_static );
105 static void zero_mutable_list ( StgMutClosure *first );
106 static void revert_dead_CAFs ( void );
108 static rtsBool traverse_weak_ptr_list ( void );
109 static void cleanup_weak_ptr_list ( StgWeak **list );
111 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
112 static void scavenge_large ( step *step );
113 static void scavenge ( step *step );
114 static void scavenge_static ( void );
115 static void scavenge_mutable_list ( generation *g );
116 static void scavenge_mut_once_list ( generation *g );
119 static void gcCAFs ( void );
122 /* -----------------------------------------------------------------------------
125 For garbage collecting generation N (and all younger generations):
127 - follow all pointers in the root set. the root set includes all
128 mutable objects in all steps in all generations.
130 - for each pointer, evacuate the object it points to into either
131 + to-space in the next higher step in that generation, if one exists,
132 + if the object's generation == N, then evacuate it to the next
133 generation if one exists, or else to-space in the current
135 + if the object's generation < N, then evacuate it to to-space
136 in the next generation.
138 - repeatedly scavenge to-space from each step in each generation
139 being collected until no more objects can be evacuated.
141 - free from-space in each step, and set from-space = to-space.
143 -------------------------------------------------------------------------- */
145 void GarbageCollect(void (*get_roots)(void))
149 lnat live, allocated, collected = 0, copied = 0;
153 CostCentreStack *prev_CCS;
156 /* tell the stats department that we've started a GC */
159 /* attribute any costs to CCS_GC */
165 /* We might have been called from Haskell land by _ccall_GC, in
166 * which case we need to call threadPaused() because the scheduler
167 * won't have done it.
169 if (CurrentTSO) { threadPaused(CurrentTSO); }
171 /* Approximate how much we allocated: number of blocks in the
172 * nursery + blocks allocated via allocate() - unused nusery blocks.
173 * This leaves a little slop at the end of each block, and doesn't
174 * take into account large objects (ToDo).
176 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
177 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
178 allocated -= BLOCK_SIZE_W;
180 if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) {
181 allocated -= (current_nursery->start + BLOCK_SIZE_W)
182 - current_nursery->free;
185 /* Figure out which generation to collect
188 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
189 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
193 major_gc = (N == RtsFlags.GcFlags.generations-1);
195 /* check stack sanity *before* GC (ToDo: check all threads) */
196 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
197 IF_DEBUG(sanity, checkFreeListSanity());
199 /* Initialise the static object lists
201 static_objects = END_OF_STATIC_LIST;
202 scavenged_static_objects = END_OF_STATIC_LIST;
204 /* zero the mutable list for the oldest generation (see comment by
205 * zero_mutable_list below).
208 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
211 /* Save the old to-space if we're doing a two-space collection
213 if (RtsFlags.GcFlags.generations == 1) {
214 old_to_space = g0s0->to_space;
215 g0s0->to_space = NULL;
218 /* Keep a count of how many new blocks we allocated during this GC
219 * (used for resizing the allocation area, later).
223 /* Initialise to-space in all the generations/steps that we're
226 for (g = 0; g <= N; g++) {
227 generations[g].mut_once_list = END_MUT_LIST;
228 generations[g].mut_list = END_MUT_LIST;
230 for (s = 0; s < generations[g].n_steps; s++) {
232 /* generation 0, step 0 doesn't need to-space */
233 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
237 /* Get a free block for to-space. Extra blocks will be chained on
241 step = &generations[g].steps[s];
242 ASSERT(step->gen->no == g);
243 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
244 bd->gen = &generations[g];
247 bd->evacuated = 1; /* it's a to-space block */
248 step->hp = bd->start;
249 step->hpLim = step->hp + BLOCK_SIZE_W;
253 step->scan = bd->start;
255 step->new_large_objects = NULL;
256 step->scavenged_large_objects = NULL;
258 /* mark the large objects as not evacuated yet */
259 for (bd = step->large_objects; bd; bd = bd->link) {
265 /* make sure the older generations have at least one block to
266 * allocate into (this makes things easier for copy(), see below.
268 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
269 for (s = 0; s < generations[g].n_steps; s++) {
270 step = &generations[g].steps[s];
271 if (step->hp_bd == NULL) {
273 bd->gen = &generations[g];
276 bd->evacuated = 0; /* *not* a to-space block */
277 step->hp = bd->start;
278 step->hpLim = step->hp + BLOCK_SIZE_W;
284 /* Set the scan pointer for older generations: remember we
285 * still have to scavenge objects that have been promoted. */
286 step->scan = step->hp;
287 step->scan_bd = step->hp_bd;
288 step->to_space = NULL;
290 step->new_large_objects = NULL;
291 step->scavenged_large_objects = NULL;
295 /* -----------------------------------------------------------------------
296 * follow all the roots that we know about:
297 * - mutable lists from each generation > N
298 * we want to *scavenge* these roots, not evacuate them: they're not
299 * going to move in this GC.
300 * Also: do them in reverse generation order. This is because we
301 * often want to promote objects that are pointed to by older
302 * generations early, so we don't have to repeatedly copy them.
303 * Doing the generations in reverse order ensures that we don't end
304 * up in the situation where we want to evac an object to gen 3 and
305 * it has already been evaced to gen 2.
309 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
310 generations[g].saved_mut_list = generations[g].mut_list;
311 generations[g].mut_list = END_MUT_LIST;
314 /* Do the mut-once lists first */
315 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
316 scavenge_mut_once_list(&generations[g]);
318 for (st = generations[g].n_steps-1; st >= 0; st--) {
319 scavenge(&generations[g].steps[st]);
323 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
324 scavenge_mutable_list(&generations[g]);
326 for (st = generations[g].n_steps-1; st >= 0; st--) {
327 scavenge(&generations[g].steps[st]);
332 /* follow all the roots that the application knows about.
337 /* And don't forget to mark the TSO if we got here direct from
340 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
343 /* Mark the weak pointer list, and prepare to detect dead weak
346 old_weak_ptr_list = weak_ptr_list;
347 weak_ptr_list = NULL;
348 weak_done = rtsFalse;
350 /* Mark the stable pointer table.
352 markStablePtrTable(major_gc);
356 /* ToDo: To fix the caf leak, we need to make the commented out
357 * parts of this code do something sensible - as described in
360 extern void markHugsObjects(void);
365 /* -------------------------------------------------------------------------
366 * Repeatedly scavenge all the areas we know about until there's no
367 * more scavenging to be done.
374 /* scavenge static objects */
375 if (major_gc && static_objects != END_OF_STATIC_LIST) {
379 /* When scavenging the older generations: Objects may have been
380 * evacuated from generations <= N into older generations, and we
381 * need to scavenge these objects. We're going to try to ensure that
382 * any evacuations that occur move the objects into at least the
383 * same generation as the object being scavenged, otherwise we
384 * have to create new entries on the mutable list for the older
388 /* scavenge each step in generations 0..maxgen */
392 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
393 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
394 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
397 step = &generations[gen].steps[st];
399 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
404 if (step->new_large_objects != NULL) {
405 scavenge_large(step);
412 if (flag) { goto loop; }
414 /* must be last... */
415 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
420 /* Final traversal of the weak pointer list (see comment by
421 * cleanUpWeakPtrList below).
423 cleanup_weak_ptr_list(&weak_ptr_list);
425 /* Now see which stable names are still alive.
427 gcStablePtrTable(major_gc);
429 /* revert dead CAFs and update enteredCAFs list */
432 /* Set the maximum blocks for the oldest generation, based on twice
433 * the amount of live data now, adjusted to fit the maximum heap
436 * This is an approximation, since in the worst case we'll need
437 * twice the amount of live data plus whatever space the other
440 if (RtsFlags.GcFlags.generations > 1) {
442 oldest_gen->max_blocks =
443 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
444 RtsFlags.GcFlags.minOldGenSize);
445 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
446 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
447 if (((int)oldest_gen->max_blocks -
448 (int)oldest_gen->steps[0].to_blocks) <
449 (RtsFlags.GcFlags.pcFreeHeap *
450 RtsFlags.GcFlags.maxHeapSize / 200)) {
457 /* run through all the generations/steps and tidy up
459 copied = new_blocks * BLOCK_SIZE_W;
460 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
463 generations[g].collections++; /* for stats */
466 for (s = 0; s < generations[g].n_steps; s++) {
468 step = &generations[g].steps[s];
470 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
471 /* Tidy the end of the to-space chains */
472 step->hp_bd->free = step->hp;
473 step->hp_bd->link = NULL;
474 /* stats information: how much we copied */
476 copied -= step->hp_bd->start + BLOCK_SIZE_W -
481 /* for generations we collected... */
484 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
486 /* free old memory and shift to-space into from-space for all
487 * the collected steps (except the allocation area). These
488 * freed blocks will probaby be quickly recycled.
490 if (!(g == 0 && s == 0)) {
491 freeChain(step->blocks);
492 step->blocks = step->to_space;
493 step->n_blocks = step->to_blocks;
494 step->to_space = NULL;
496 for (bd = step->blocks; bd != NULL; bd = bd->link) {
497 bd->evacuated = 0; /* now from-space */
501 /* LARGE OBJECTS. The current live large objects are chained on
502 * scavenged_large, having been moved during garbage
503 * collection from large_objects. Any objects left on
504 * large_objects list are therefore dead, so we free them here.
506 for (bd = step->large_objects; bd != NULL; bd = next) {
511 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
514 step->large_objects = step->scavenged_large_objects;
516 /* Set the maximum blocks for this generation, interpolating
517 * between the maximum size of the oldest and youngest
520 * max_blocks = oldgen_max_blocks * G
521 * ----------------------
526 generations[g].max_blocks = (oldest_gen->max_blocks * g)
527 / (RtsFlags.GcFlags.generations-1);
529 generations[g].max_blocks = oldest_gen->max_blocks;
532 /* for older generations... */
535 /* For older generations, we need to append the
536 * scavenged_large_object list (i.e. large objects that have been
537 * promoted during this GC) to the large_object list for that step.
539 for (bd = step->scavenged_large_objects; bd; bd = next) {
542 dbl_link_onto(bd, &step->large_objects);
545 /* add the new blocks we promoted during this GC */
546 step->n_blocks += step->to_blocks;
551 /* Guess the amount of live data for stats. */
554 /* Free the small objects allocated via allocate(), since this will
555 * all have been copied into G0S1 now.
557 if (small_alloc_list != NULL) {
558 freeChain(small_alloc_list);
560 small_alloc_list = NULL;
564 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
566 /* Two-space collector:
567 * Free the old to-space, and estimate the amount of live data.
569 if (RtsFlags.GcFlags.generations == 1) {
572 if (old_to_space != NULL) {
573 freeChain(old_to_space);
575 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
576 bd->evacuated = 0; /* now from-space */
579 /* For a two-space collector, we need to resize the nursery. */
581 /* set up a new nursery. Allocate a nursery size based on a
582 * function of the amount of live data (currently a factor of 2,
583 * should be configurable (ToDo)). Use the blocks from the old
584 * nursery if possible, freeing up any left over blocks.
586 * If we get near the maximum heap size, then adjust our nursery
587 * size accordingly. If the nursery is the same size as the live
588 * data (L), then we need 3L bytes. We can reduce the size of the
589 * nursery to bring the required memory down near 2L bytes.
591 * A normal 2-space collector would need 4L bytes to give the same
592 * performance we get from 3L bytes, reducing to the same
593 * performance at 2L bytes.
595 blocks = g0s0->to_blocks;
597 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
598 RtsFlags.GcFlags.maxHeapSize ) {
599 int adjusted_blocks; /* signed on purpose */
602 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
603 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
604 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
605 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
608 blocks = adjusted_blocks;
611 blocks *= RtsFlags.GcFlags.oldGenFactor;
612 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
613 blocks = RtsFlags.GcFlags.minAllocAreaSize;
616 resizeNursery(blocks);
619 /* Generational collector:
620 * If the user has given us a suggested heap size, adjust our
621 * allocation area to make best use of the memory available.
624 if (RtsFlags.GcFlags.heapSizeSuggestion) {
626 nat needed = calcNeeded(); /* approx blocks needed at next GC */
628 /* Guess how much will be live in generation 0 step 0 next time.
629 * A good approximation is the obtained by finding the
630 * percentage of g0s0 that was live at the last minor GC.
633 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
636 /* Estimate a size for the allocation area based on the
637 * information available. We might end up going slightly under
638 * or over the suggested heap size, but we should be pretty
641 * Formula: suggested - needed
642 * ----------------------------
643 * 1 + g0s0_pcnt_kept/100
645 * where 'needed' is the amount of memory needed at the next
646 * collection for collecting all steps except g0s0.
649 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
650 (100 + (int)g0s0_pcnt_kept);
652 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
653 blocks = RtsFlags.GcFlags.minAllocAreaSize;
656 resizeNursery((nat)blocks);
660 /* mark the garbage collected CAFs as dead */
662 if (major_gc) { gcCAFs(); }
665 /* zero the scavenged static object list */
667 zero_static_object_list(scavenged_static_objects);
672 for (bd = g0s0->blocks; bd; bd = bd->link) {
673 bd->free = bd->start;
674 ASSERT(bd->gen == g0);
675 ASSERT(bd->step == g0s0);
676 IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
678 current_nursery = g0s0->blocks;
680 /* start any pending finalizers */
681 scheduleFinalizers(old_weak_ptr_list);
683 /* check sanity after GC */
684 IF_DEBUG(sanity, checkSanity(N));
686 /* extra GC trace info */
687 IF_DEBUG(gc, stat_describe_gens());
690 /* symbol-table based profiling */
691 /* heapCensus(to_space); */ /* ToDo */
694 /* restore enclosing cost centre */
700 /* check for memory leaks if sanity checking is on */
701 IF_DEBUG(sanity, memInventory());
703 /* ok, GC over: tell the stats department what happened. */
704 stat_endGC(allocated, collected, live, copied, N);
707 /* -----------------------------------------------------------------------------
710 traverse_weak_ptr_list is called possibly many times during garbage
711 collection. It returns a flag indicating whether it did any work
712 (i.e. called evacuate on any live pointers).
714 Invariant: traverse_weak_ptr_list is called when the heap is in an
715 idempotent state. That means that there are no pending
716 evacuate/scavenge operations. This invariant helps the weak
717 pointer code decide which weak pointers are dead - if there are no
718 new live weak pointers, then all the currently unreachable ones are
721 For generational GC: we just don't try to finalize weak pointers in
722 older generations than the one we're collecting. This could
723 probably be optimised by keeping per-generation lists of weak
724 pointers, but for a few weak pointers this scheme will work.
725 -------------------------------------------------------------------------- */
728 traverse_weak_ptr_list(void)
730 StgWeak *w, **last_w, *next_w;
732 rtsBool flag = rtsFalse;
734 if (weak_done) { return rtsFalse; }
736 /* doesn't matter where we evacuate values/finalizers to, since
737 * these pointers are treated as roots (iff the keys are alive).
741 last_w = &old_weak_ptr_list;
742 for (w = old_weak_ptr_list; w; w = next_w) {
744 /* First, this weak pointer might have been evacuated. If so,
745 * remove the forwarding pointer from the weak_ptr_list.
747 if (get_itbl(w)->type == EVACUATED) {
748 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
752 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
753 * called on a live weak pointer object. Just remove it.
755 if (w->header.info == &DEAD_WEAK_info) {
756 next_w = ((StgDeadWeak *)w)->link;
761 ASSERT(get_itbl(w)->type == WEAK);
763 /* Now, check whether the key is reachable.
765 if ((new = isAlive(w->key))) {
767 /* evacuate the value and finalizer */
768 w->value = evacuate(w->value);
769 w->finalizer = evacuate(w->finalizer);
770 /* remove this weak ptr from the old_weak_ptr list */
772 /* and put it on the new weak ptr list */
774 w->link = weak_ptr_list;
777 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
787 /* If we didn't make any changes, then we can go round and kill all
788 * the dead weak pointers. The old_weak_ptr list is used as a list
789 * of pending finalizers later on.
791 if (flag == rtsFalse) {
792 cleanup_weak_ptr_list(&old_weak_ptr_list);
793 for (w = old_weak_ptr_list; w; w = w->link) {
794 w->finalizer = evacuate(w->finalizer);
802 /* -----------------------------------------------------------------------------
803 After GC, the live weak pointer list may have forwarding pointers
804 on it, because a weak pointer object was evacuated after being
805 moved to the live weak pointer list. We remove those forwarding
808 Also, we don't consider weak pointer objects to be reachable, but
809 we must nevertheless consider them to be "live" and retain them.
810 Therefore any weak pointer objects which haven't as yet been
811 evacuated need to be evacuated now.
812 -------------------------------------------------------------------------- */
815 cleanup_weak_ptr_list ( StgWeak **list )
817 StgWeak *w, **last_w;
820 for (w = *list; w; w = w->link) {
822 if (get_itbl(w)->type == EVACUATED) {
823 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
827 if (Bdescr((P_)w)->evacuated == 0) {
828 (StgClosure *)w = evacuate((StgClosure *)w);
835 /* -----------------------------------------------------------------------------
836 isAlive determines whether the given closure is still alive (after
837 a garbage collection) or not. It returns the new address of the
838 closure if it is alive, or NULL otherwise.
839 -------------------------------------------------------------------------- */
842 isAlive(StgClosure *p)
844 const StgInfoTable *info;
850 /* ToDo: for static closures, check the static link field.
851 * Problem here is that we sometimes don't set the link field, eg.
852 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
855 /* ignore closures in generations that we're not collecting. */
856 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
860 switch (info->type) {
865 case IND_OLDGEN: /* rely on compatible layout with StgInd */
866 case IND_OLDGEN_PERM:
867 /* follow indirections */
868 p = ((StgInd *)p)->indirectee;
873 return ((StgEvacuated *)p)->evacuee;
883 MarkRoot(StgClosure *root)
885 return evacuate(root);
888 static void addBlock(step *step)
890 bdescr *bd = allocBlock();
894 if (step->gen->no <= N) {
900 step->hp_bd->free = step->hp;
901 step->hp_bd->link = bd;
902 step->hp = bd->start;
903 step->hpLim = step->hp + BLOCK_SIZE_W;
909 static __inline__ void
910 upd_evacuee(StgClosure *p, StgClosure *dest)
912 p->header.info = &EVACUATED_info;
913 ((StgEvacuated *)p)->evacuee = dest;
916 static __inline__ StgClosure *
917 copy(StgClosure *src, nat size, step *step)
921 TICK_GC_WORDS_COPIED(size);
922 /* Find out where we're going, using the handy "to" pointer in
923 * the step of the source object. If it turns out we need to
924 * evacuate to an older generation, adjust it here (see comment
927 if (step->gen->no < evac_gen) {
928 #ifdef NO_EAGER_PROMOTION
929 failed_to_evac = rtsTrue;
931 step = &generations[evac_gen].steps[0];
935 /* chain a new block onto the to-space for the destination step if
938 if (step->hp + size >= step->hpLim) {
942 for(to = step->hp, from = (P_)src; size>0; --size) {
948 upd_evacuee(src,(StgClosure *)dest);
949 return (StgClosure *)dest;
952 /* Special version of copy() for when we only want to copy the info
953 * pointer of an object, but reserve some padding after it. This is
954 * used to optimise evacuation of BLACKHOLEs.
957 static __inline__ StgClosure *
958 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
962 TICK_GC_WORDS_COPIED(size_to_copy);
963 if (step->gen->no < evac_gen) {
964 #ifdef NO_EAGER_PROMOTION
965 failed_to_evac = rtsTrue;
967 step = &generations[evac_gen].steps[0];
971 if (step->hp + size_to_reserve >= step->hpLim) {
975 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
980 step->hp += size_to_reserve;
981 upd_evacuee(src,(StgClosure *)dest);
982 return (StgClosure *)dest;
985 /* -----------------------------------------------------------------------------
986 Evacuate a large object
988 This just consists of removing the object from the (doubly-linked)
989 large_alloc_list, and linking it on to the (singly-linked)
990 new_large_objects list, from where it will be scavenged later.
992 Convention: bd->evacuated is /= 0 for a large object that has been
993 evacuated, or 0 otherwise.
994 -------------------------------------------------------------------------- */
997 evacuate_large(StgPtr p, rtsBool mutable)
999 bdescr *bd = Bdescr(p);
1002 /* should point to the beginning of the block */
1003 ASSERT(((W_)p & BLOCK_MASK) == 0);
1005 /* already evacuated? */
1006 if (bd->evacuated) {
1007 /* Don't forget to set the failed_to_evac flag if we didn't get
1008 * the desired destination (see comments in evacuate()).
1010 if (bd->gen->no < evac_gen) {
1011 failed_to_evac = rtsTrue;
1012 TICK_GC_FAILED_PROMOTION();
1018 /* remove from large_object list */
1020 bd->back->link = bd->link;
1021 } else { /* first object in the list */
1022 step->large_objects = bd->link;
1025 bd->link->back = bd->back;
1028 /* link it on to the evacuated large object list of the destination step
1030 step = bd->step->to;
1031 if (step->gen->no < evac_gen) {
1032 #ifdef NO_EAGER_PROMOTION
1033 failed_to_evac = rtsTrue;
1035 step = &generations[evac_gen].steps[0];
1040 bd->gen = step->gen;
1041 bd->link = step->new_large_objects;
1042 step->new_large_objects = bd;
1046 recordMutable((StgMutClosure *)p);
1050 /* -----------------------------------------------------------------------------
1051 Adding a MUT_CONS to an older generation.
1053 This is necessary from time to time when we end up with an
1054 old-to-new generation pointer in a non-mutable object. We defer
1055 the promotion until the next GC.
1056 -------------------------------------------------------------------------- */
1059 mkMutCons(StgClosure *ptr, generation *gen)
1064 step = &gen->steps[0];
1066 /* chain a new block onto the to-space for the destination step if
1069 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1073 q = (StgMutVar *)step->hp;
1074 step->hp += sizeofW(StgMutVar);
1076 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1078 recordOldToNewPtrs((StgMutClosure *)q);
1080 return (StgClosure *)q;
1083 /* -----------------------------------------------------------------------------
1086 This is called (eventually) for every live object in the system.
1088 The caller to evacuate specifies a desired generation in the
1089 evac_gen global variable. The following conditions apply to
1090 evacuating an object which resides in generation M when we're
1091 collecting up to generation N
1095 else evac to step->to
1097 if M < evac_gen evac to evac_gen, step 0
1099 if the object is already evacuated, then we check which generation
1102 if M >= evac_gen do nothing
1103 if M < evac_gen set failed_to_evac flag to indicate that we
1104 didn't manage to evacuate this object into evac_gen.
1106 -------------------------------------------------------------------------- */
1110 evacuate(StgClosure *q)
1115 const StgInfoTable *info;
1118 if (HEAP_ALLOCED(q)) {
1120 if (bd->gen->no > N) {
1121 /* Can't evacuate this object, because it's in a generation
1122 * older than the ones we're collecting. Let's hope that it's
1123 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1125 if (bd->gen->no < evac_gen) {
1127 failed_to_evac = rtsTrue;
1128 TICK_GC_FAILED_PROMOTION();
1132 step = bd->step->to;
1135 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1138 /* make sure the info pointer is into text space */
1139 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1140 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1143 switch (info -> type) {
1147 nat size = bco_sizeW((StgBCO*)q);
1149 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1150 evacuate_large((P_)q, rtsFalse);
1153 /* just copy the block */
1154 to = copy(q,size,step);
1160 ASSERT(q->header.info != &MUT_CONS_info);
1162 to = copy(q,sizeW_fromITBL(info),step);
1163 recordMutable((StgMutClosure *)to);
1170 return copy(q,sizeofW(StgHeader)+1,step);
1172 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1177 #ifdef NO_PROMOTE_THUNKS
1178 if (bd->gen->no == 0 &&
1179 bd->step->no != 0 &&
1180 bd->step->no == bd->gen->n_steps-1) {
1184 return copy(q,sizeofW(StgHeader)+2,step);
1192 return copy(q,sizeofW(StgHeader)+2,step);
1198 case IND_OLDGEN_PERM:
1204 return copy(q,sizeW_fromITBL(info),step);
1207 case SE_CAF_BLACKHOLE:
1210 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1213 to = copy(q,BLACKHOLE_sizeW(),step);
1214 recordMutable((StgMutClosure *)to);
1217 case THUNK_SELECTOR:
1219 const StgInfoTable* selectee_info;
1220 StgClosure* selectee = ((StgSelector*)q)->selectee;
1223 selectee_info = get_itbl(selectee);
1224 switch (selectee_info->type) {
1233 StgWord32 offset = info->layout.selector_offset;
1235 /* check that the size is in range */
1237 (StgWord32)(selectee_info->layout.payload.ptrs +
1238 selectee_info->layout.payload.nptrs));
1240 /* perform the selection! */
1241 q = selectee->payload[offset];
1243 /* if we're already in to-space, there's no need to continue
1244 * with the evacuation, just update the source address with
1245 * a pointer to the (evacuated) constructor field.
1247 if (HEAP_ALLOCED(q)) {
1248 bdescr *bd = Bdescr((P_)q);
1249 if (bd->evacuated) {
1250 if (bd->gen->no < evac_gen) {
1251 failed_to_evac = rtsTrue;
1252 TICK_GC_FAILED_PROMOTION();
1258 /* otherwise, carry on and evacuate this constructor field,
1259 * (but not the constructor itself)
1268 case IND_OLDGEN_PERM:
1269 selectee = stgCast(StgInd *,selectee)->indirectee;
1273 selectee = stgCast(StgCAF *,selectee)->value;
1277 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1287 case THUNK_SELECTOR:
1288 /* aargh - do recursively???? */
1291 case SE_CAF_BLACKHOLE:
1295 /* not evaluated yet */
1299 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1300 (int)(selectee_info->type));
1303 return copy(q,THUNK_SELECTOR_sizeW(),step);
1307 /* follow chains of indirections, don't evacuate them */
1308 q = ((StgInd*)q)->indirectee;
1312 if (info->srt_len > 0 && major_gc &&
1313 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1314 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1315 static_objects = (StgClosure *)q;
1320 if (info->srt_len > 0 && major_gc &&
1321 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1322 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1323 static_objects = (StgClosure *)q;
1328 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1329 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1330 static_objects = (StgClosure *)q;
1335 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1336 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1337 static_objects = (StgClosure *)q;
1341 case CONSTR_INTLIKE:
1342 case CONSTR_CHARLIKE:
1343 case CONSTR_NOCAF_STATIC:
1344 /* no need to put these on the static linked list, they don't need
1359 /* shouldn't see these */
1360 barf("evacuate: stack frame\n");
1364 /* these are special - the payload is a copy of a chunk of stack,
1366 return copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1369 /* Already evacuated, just return the forwarding address.
1370 * HOWEVER: if the requested destination generation (evac_gen) is
1371 * older than the actual generation (because the object was
1372 * already evacuated to a younger generation) then we have to
1373 * set the failed_to_evac flag to indicate that we couldn't
1374 * manage to promote the object to the desired generation.
1376 if (evac_gen > 0) { /* optimisation */
1377 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1378 if (Bdescr((P_)p)->gen->no < evac_gen) {
1379 /* fprintf(stderr,"evac failed!\n");*/
1380 failed_to_evac = rtsTrue;
1381 TICK_GC_FAILED_PROMOTION();
1384 return ((StgEvacuated*)q)->evacuee;
1388 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1390 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1391 evacuate_large((P_)q, rtsFalse);
1394 /* just copy the block */
1395 return copy(q,size,step);
1400 case MUT_ARR_PTRS_FROZEN:
1402 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1404 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1405 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1408 /* just copy the block */
1409 to = copy(q,size,step);
1410 if (info->type == MUT_ARR_PTRS) {
1411 recordMutable((StgMutClosure *)to);
1419 StgTSO *tso = stgCast(StgTSO *,q);
1420 nat size = tso_sizeW(tso);
1423 /* Large TSOs don't get moved, so no relocation is required.
1425 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1426 evacuate_large((P_)q, rtsTrue);
1429 /* To evacuate a small TSO, we need to relocate the update frame
1433 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1435 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1437 /* relocate the stack pointers... */
1438 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1439 new_tso->sp = (StgPtr)new_tso->sp + diff;
1440 new_tso->splim = (StgPtr)new_tso->splim + diff;
1442 relocate_TSO(tso, new_tso);
1444 recordMutable((StgMutClosure *)new_tso);
1445 return (StgClosure *)new_tso;
1451 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1455 barf("evacuate: strange closure type %d", (int)(info->type));
1461 /* -----------------------------------------------------------------------------
1462 relocate_TSO is called just after a TSO has been copied from src to
1463 dest. It adjusts the update frame list for the new location.
1464 -------------------------------------------------------------------------- */
1467 relocate_TSO(StgTSO *src, StgTSO *dest)
1474 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1478 while ((P_)su < dest->stack + dest->stack_size) {
1479 switch (get_itbl(su)->type) {
1481 /* GCC actually manages to common up these three cases! */
1484 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1489 cf = (StgCatchFrame *)su;
1490 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1495 sf = (StgSeqFrame *)su;
1496 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1505 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1514 scavenge_srt(const StgInfoTable *info)
1516 StgClosure **srt, **srt_end;
1518 /* evacuate the SRT. If srt_len is zero, then there isn't an
1519 * srt field in the info table. That's ok, because we'll
1520 * never dereference it.
1522 srt = stgCast(StgClosure **,info->srt);
1523 srt_end = srt + info->srt_len;
1524 for (; srt < srt_end; srt++) {
1525 /* Special-case to handle references to closures hiding out in DLLs, since
1526 double indirections required to get at those. The code generator knows
1527 which is which when generating the SRT, so it stores the (indirect)
1528 reference to the DLL closure in the table by first adding one to it.
1529 We check for this here, and undo the addition before evacuating it.
1531 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1532 closure that's fixed at link-time, and no extra magic is required.
1534 #ifdef ENABLE_WIN32_DLL_SUPPORT
1535 if ( stgCast(unsigned long,*srt) & 0x1 ) {
1536 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1546 /* -----------------------------------------------------------------------------
1547 Scavenge a given step until there are no more objects in this step
1550 evac_gen is set by the caller to be either zero (for a step in a
1551 generation < N) or G where G is the generation of the step being
1554 We sometimes temporarily change evac_gen back to zero if we're
1555 scavenging a mutable object where early promotion isn't such a good
1557 -------------------------------------------------------------------------- */
1561 scavenge(step *step)
1564 const StgInfoTable *info;
1566 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1571 failed_to_evac = rtsFalse;
1573 /* scavenge phase - standard breadth-first scavenging of the
1577 while (bd != step->hp_bd || p < step->hp) {
1579 /* If we're at the end of this block, move on to the next block */
1580 if (bd != step->hp_bd && p == bd->free) {
1586 q = p; /* save ptr to object */
1588 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1589 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1591 info = get_itbl((StgClosure *)p);
1592 switch (info -> type) {
1596 StgBCO* bco = stgCast(StgBCO*,p);
1598 for (i = 0; i < bco->n_ptrs; i++) {
1599 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1601 p += bco_sizeW(bco);
1606 /* treat MVars specially, because we don't want to evacuate the
1607 * mut_link field in the middle of the closure.
1610 StgMVar *mvar = ((StgMVar *)p);
1612 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1613 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1614 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1615 p += sizeofW(StgMVar);
1616 evac_gen = saved_evac_gen;
1624 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1625 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1626 p += sizeofW(StgHeader) + 2;
1631 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1632 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1638 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1639 p += sizeofW(StgHeader) + 1;
1644 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1650 p += sizeofW(StgHeader) + 1;
1657 p += sizeofW(StgHeader) + 2;
1664 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1665 p += sizeofW(StgHeader) + 2;
1680 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1681 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1682 (StgClosure *)*p = evacuate((StgClosure *)*p);
1684 p += info->layout.payload.nptrs;
1689 if (step->gen->no != 0) {
1690 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1693 case IND_OLDGEN_PERM:
1694 ((StgIndOldGen *)p)->indirectee =
1695 evacuate(((StgIndOldGen *)p)->indirectee);
1696 if (failed_to_evac) {
1697 failed_to_evac = rtsFalse;
1698 recordOldToNewPtrs((StgMutClosure *)p);
1700 p += sizeofW(StgIndOldGen);
1705 StgCAF *caf = (StgCAF *)p;
1707 caf->body = evacuate(caf->body);
1708 if (failed_to_evac) {
1709 failed_to_evac = rtsFalse;
1710 recordOldToNewPtrs((StgMutClosure *)p);
1712 caf->mut_link = NULL;
1714 p += sizeofW(StgCAF);
1720 StgCAF *caf = (StgCAF *)p;
1722 caf->body = evacuate(caf->body);
1723 caf->value = evacuate(caf->value);
1724 if (failed_to_evac) {
1725 failed_to_evac = rtsFalse;
1726 recordOldToNewPtrs((StgMutClosure *)p);
1728 caf->mut_link = NULL;
1730 p += sizeofW(StgCAF);
1735 /* ignore MUT_CONSs */
1736 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1738 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1739 evac_gen = saved_evac_gen;
1741 p += sizeofW(StgMutVar);
1745 case SE_CAF_BLACKHOLE:
1748 p += BLACKHOLE_sizeW();
1753 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1754 (StgClosure *)bh->blocking_queue =
1755 evacuate((StgClosure *)bh->blocking_queue);
1756 if (failed_to_evac) {
1757 failed_to_evac = rtsFalse;
1758 recordMutable((StgMutClosure *)bh);
1760 p += BLACKHOLE_sizeW();
1764 case THUNK_SELECTOR:
1766 StgSelector *s = (StgSelector *)p;
1767 s->selectee = evacuate(s->selectee);
1768 p += THUNK_SELECTOR_sizeW();
1774 barf("scavenge:IND???\n");
1776 case CONSTR_INTLIKE:
1777 case CONSTR_CHARLIKE:
1779 case CONSTR_NOCAF_STATIC:
1783 /* Shouldn't see a static object here. */
1784 barf("scavenge: STATIC object\n");
1796 /* Shouldn't see stack frames here. */
1797 barf("scavenge: stack frame\n");
1799 case AP_UPD: /* same as PAPs */
1801 /* Treat a PAP just like a section of stack, not forgetting to
1802 * evacuate the function pointer too...
1805 StgPAP* pap = stgCast(StgPAP*,p);
1807 pap->fun = evacuate(pap->fun);
1808 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1809 p += pap_sizeW(pap);
1814 /* nothing to follow */
1815 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1819 /* follow everything */
1823 evac_gen = 0; /* repeatedly mutable */
1824 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1825 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1826 (StgClosure *)*p = evacuate((StgClosure *)*p);
1828 evac_gen = saved_evac_gen;
1832 case MUT_ARR_PTRS_FROZEN:
1833 /* follow everything */
1835 StgPtr start = p, next;
1837 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1838 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1839 (StgClosure *)*p = evacuate((StgClosure *)*p);
1841 if (failed_to_evac) {
1842 /* we can do this easier... */
1843 recordMutable((StgMutClosure *)start);
1844 failed_to_evac = rtsFalse;
1855 /* chase the link field for any TSOs on the same queue */
1856 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1857 if ( tso->why_blocked == BlockedOnMVar
1858 || tso->why_blocked == BlockedOnBlackHole) {
1859 tso->block_info.closure = evacuate(tso->block_info.closure);
1861 /* scavenge this thread's stack */
1862 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1863 evac_gen = saved_evac_gen;
1864 p += tso_sizeW(tso);
1871 barf("scavenge: unimplemented/strange closure type\n");
1877 /* If we didn't manage to promote all the objects pointed to by
1878 * the current object, then we have to designate this object as
1879 * mutable (because it contains old-to-new generation pointers).
1881 if (failed_to_evac) {
1882 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1883 failed_to_evac = rtsFalse;
1891 /* -----------------------------------------------------------------------------
1892 Scavenge one object.
1894 This is used for objects that are temporarily marked as mutable
1895 because they contain old-to-new generation pointers. Only certain
1896 objects can have this property.
1897 -------------------------------------------------------------------------- */
1899 scavenge_one(StgClosure *p)
1901 const StgInfoTable *info;
1904 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1905 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1909 switch (info -> type) {
1912 case FUN_1_0: /* hardly worth specialising these guys */
1932 case IND_OLDGEN_PERM:
1937 end = (P_)p->payload + info->layout.payload.ptrs;
1938 for (q = (P_)p->payload; q < end; q++) {
1939 (StgClosure *)*q = evacuate((StgClosure *)*q);
1945 case SE_CAF_BLACKHOLE:
1950 case THUNK_SELECTOR:
1952 StgSelector *s = (StgSelector *)p;
1953 s->selectee = evacuate(s->selectee);
1957 case AP_UPD: /* same as PAPs */
1959 /* Treat a PAP just like a section of stack, not forgetting to
1960 * evacuate the function pointer too...
1963 StgPAP* pap = (StgPAP *)p;
1965 pap->fun = evacuate(pap->fun);
1966 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1971 /* This might happen if for instance a MUT_CONS was pointing to a
1972 * THUNK which has since been updated. The IND_OLDGEN will
1973 * be on the mutable list anyway, so we don't need to do anything
1979 barf("scavenge_one: strange object");
1982 no_luck = failed_to_evac;
1983 failed_to_evac = rtsFalse;
1988 /* -----------------------------------------------------------------------------
1989 Scavenging mutable lists.
1991 We treat the mutable list of each generation > N (i.e. all the
1992 generations older than the one being collected) as roots. We also
1993 remove non-mutable objects from the mutable list at this point.
1994 -------------------------------------------------------------------------- */
1997 scavenge_mut_once_list(generation *gen)
1999 const StgInfoTable *info;
2000 StgMutClosure *p, *next, *new_list;
2002 p = gen->mut_once_list;
2003 new_list = END_MUT_LIST;
2007 failed_to_evac = rtsFalse;
2009 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2011 /* make sure the info pointer is into text space */
2012 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2013 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2016 switch(info->type) {
2019 case IND_OLDGEN_PERM:
2021 /* Try to pull the indirectee into this generation, so we can
2022 * remove the indirection from the mutable list.
2024 ((StgIndOldGen *)p)->indirectee =
2025 evacuate(((StgIndOldGen *)p)->indirectee);
2028 /* Debugging code to print out the size of the thing we just
2032 StgPtr start = gen->steps[0].scan;
2033 bdescr *start_bd = gen->steps[0].scan_bd;
2035 scavenge(&gen->steps[0]);
2036 if (start_bd != gen->steps[0].scan_bd) {
2037 size += (P_)BLOCK_ROUND_UP(start) - start;
2038 start_bd = start_bd->link;
2039 while (start_bd != gen->steps[0].scan_bd) {
2040 size += BLOCK_SIZE_W;
2041 start_bd = start_bd->link;
2043 size += gen->steps[0].scan -
2044 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2046 size = gen->steps[0].scan - start;
2048 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2052 /* failed_to_evac might happen if we've got more than two
2053 * generations, we're collecting only generation 0, the
2054 * indirection resides in generation 2 and the indirectee is
2057 if (failed_to_evac) {
2058 failed_to_evac = rtsFalse;
2059 p->mut_link = new_list;
2062 /* the mut_link field of an IND_STATIC is overloaded as the
2063 * static link field too (it just so happens that we don't need
2064 * both at the same time), so we need to NULL it out when
2065 * removing this object from the mutable list because the static
2066 * link fields are all assumed to be NULL before doing a major
2074 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2075 * it from the mutable list if possible by promoting whatever it
2078 ASSERT(p->header.info == &MUT_CONS_info);
2079 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2080 /* didn't manage to promote everything, so put the
2081 * MUT_CONS back on the list.
2083 p->mut_link = new_list;
2090 StgCAF *caf = (StgCAF *)p;
2091 caf->body = evacuate(caf->body);
2092 caf->value = evacuate(caf->value);
2093 if (failed_to_evac) {
2094 failed_to_evac = rtsFalse;
2095 p->mut_link = new_list;
2105 StgCAF *caf = (StgCAF *)p;
2106 caf->body = evacuate(caf->body);
2107 if (failed_to_evac) {
2108 failed_to_evac = rtsFalse;
2109 p->mut_link = new_list;
2118 /* shouldn't have anything else on the mutables list */
2119 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2123 gen->mut_once_list = new_list;
2128 scavenge_mutable_list(generation *gen)
2130 const StgInfoTable *info;
2131 StgMutClosure *p, *next;
2133 p = gen->saved_mut_list;
2137 failed_to_evac = rtsFalse;
2139 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2141 /* make sure the info pointer is into text space */
2142 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2143 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2146 switch(info->type) {
2148 case MUT_ARR_PTRS_FROZEN:
2149 /* remove this guy from the mutable list, but follow the ptrs
2150 * anyway (and make sure they get promoted to this gen).
2155 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2157 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2158 (StgClosure *)*q = evacuate((StgClosure *)*q);
2162 if (failed_to_evac) {
2163 failed_to_evac = rtsFalse;
2164 p->mut_link = gen->mut_list;
2171 /* follow everything */
2172 p->mut_link = gen->mut_list;
2177 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2178 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2179 (StgClosure *)*q = evacuate((StgClosure *)*q);
2185 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2186 * it from the mutable list if possible by promoting whatever it
2189 ASSERT(p->header.info != &MUT_CONS_info);
2190 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2191 p->mut_link = gen->mut_list;
2197 StgMVar *mvar = (StgMVar *)p;
2198 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2199 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2200 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2201 p->mut_link = gen->mut_list;
2208 StgTSO *tso = (StgTSO *)p;
2210 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2211 if ( tso->why_blocked == BlockedOnMVar
2212 || tso->why_blocked == BlockedOnBlackHole) {
2213 tso->block_info.closure = evacuate(tso->block_info.closure);
2215 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2217 /* Don't take this TSO off the mutable list - it might still
2218 * point to some younger objects (because we set evac_gen to 0
2221 tso->mut_link = gen->mut_list;
2222 gen->mut_list = (StgMutClosure *)tso;
2228 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2229 (StgClosure *)bh->blocking_queue =
2230 evacuate((StgClosure *)bh->blocking_queue);
2231 p->mut_link = gen->mut_list;
2237 /* shouldn't have anything else on the mutables list */
2238 barf("scavenge_mut_list: strange object? %d", (int)(info->type));
2244 scavenge_static(void)
2246 StgClosure* p = static_objects;
2247 const StgInfoTable *info;
2249 /* Always evacuate straight to the oldest generation for static
2251 evac_gen = oldest_gen->no;
2253 /* keep going until we've scavenged all the objects on the linked
2255 while (p != END_OF_STATIC_LIST) {
2259 /* make sure the info pointer is into text space */
2260 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2261 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2263 /* Take this object *off* the static_objects list,
2264 * and put it on the scavenged_static_objects list.
2266 static_objects = STATIC_LINK(info,p);
2267 STATIC_LINK(info,p) = scavenged_static_objects;
2268 scavenged_static_objects = p;
2270 switch (info -> type) {
2274 StgInd *ind = (StgInd *)p;
2275 ind->indirectee = evacuate(ind->indirectee);
2277 /* might fail to evacuate it, in which case we have to pop it
2278 * back on the mutable list (and take it off the
2279 * scavenged_static list because the static link and mut link
2280 * pointers are one and the same).
2282 if (failed_to_evac) {
2283 failed_to_evac = rtsFalse;
2284 scavenged_static_objects = STATIC_LINK(info,p);
2285 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2286 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2300 next = (P_)p->payload + info->layout.payload.ptrs;
2301 /* evacuate the pointers */
2302 for (q = (P_)p->payload; q < next; q++) {
2303 (StgClosure *)*q = evacuate((StgClosure *)*q);
2309 barf("scavenge_static");
2312 ASSERT(failed_to_evac == rtsFalse);
2314 /* get the next static object from the list. Remeber, there might
2315 * be more stuff on this list now that we've done some evacuating!
2316 * (static_objects is a global)
2322 /* -----------------------------------------------------------------------------
2323 scavenge_stack walks over a section of stack and evacuates all the
2324 objects pointed to by it. We can use the same code for walking
2325 PAPs, since these are just sections of copied stack.
2326 -------------------------------------------------------------------------- */
2329 scavenge_stack(StgPtr p, StgPtr stack_end)
2332 const StgInfoTable* info;
2336 * Each time around this loop, we are looking at a chunk of stack
2337 * that starts with either a pending argument section or an
2338 * activation record.
2341 while (p < stack_end) {
2344 /* If we've got a tag, skip over that many words on the stack */
2345 if (IS_ARG_TAG((W_)q)) {
2350 /* Is q a pointer to a closure?
2352 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2354 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2355 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
2357 /* otherwise, must be a pointer into the allocation space. */
2360 (StgClosure *)*p = evacuate((StgClosure *)q);
2366 * Otherwise, q must be the info pointer of an activation
2367 * record. All activation records have 'bitmap' style layout
2370 info = get_itbl((StgClosure *)p);
2372 switch (info->type) {
2374 /* Dynamic bitmap: the mask is stored on the stack */
2376 bitmap = ((StgRetDyn *)p)->liveness;
2377 p = (P_)&((StgRetDyn *)p)->payload[0];
2380 /* probably a slow-entry point return address: */
2386 /* Specialised code for update frames, since they're so common.
2387 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2388 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2392 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2394 nat type = get_itbl(frame->updatee)->type;
2396 p += sizeofW(StgUpdateFrame);
2397 if (type == EVACUATED) {
2398 frame->updatee = evacuate(frame->updatee);
2401 bdescr *bd = Bdescr((P_)frame->updatee);
2403 if (bd->gen->no > N) {
2404 if (bd->gen->no < evac_gen) {
2405 failed_to_evac = rtsTrue;
2410 /* Don't promote blackholes */
2412 if (!(step->gen->no == 0 &&
2414 step->no == step->gen->n_steps-1)) {
2421 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2422 sizeofW(StgHeader), step);
2423 frame->updatee = to;
2426 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2427 frame->updatee = to;
2428 recordMutable((StgMutClosure *)to);
2431 /* will never be SE_{,CAF_}BLACKHOLE, since we
2432 don't push an update frame for single-entry thunks. KSW 1999-01. */
2433 barf("scavenge_stack: UPDATE_FRAME updatee");
2438 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2445 bitmap = info->layout.bitmap;
2448 while (bitmap != 0) {
2449 if ((bitmap & 1) == 0) {
2450 (StgClosure *)*p = evacuate((StgClosure *)*p);
2453 bitmap = bitmap >> 1;
2460 /* large bitmap (> 32 entries) */
2465 StgLargeBitmap *large_bitmap;
2468 large_bitmap = info->layout.large_bitmap;
2471 for (i=0; i<large_bitmap->size; i++) {
2472 bitmap = large_bitmap->bitmap[i];
2473 q = p + sizeof(W_) * 8;
2474 while (bitmap != 0) {
2475 if ((bitmap & 1) == 0) {
2476 (StgClosure *)*p = evacuate((StgClosure *)*p);
2479 bitmap = bitmap >> 1;
2481 if (i+1 < large_bitmap->size) {
2483 (StgClosure *)*p = evacuate((StgClosure *)*p);
2489 /* and don't forget to follow the SRT */
2494 barf("scavenge_stack: weird activation record found on stack.\n");
2499 /*-----------------------------------------------------------------------------
2500 scavenge the large object list.
2502 evac_gen set by caller; similar games played with evac_gen as with
2503 scavenge() - see comment at the top of scavenge(). Most large
2504 objects are (repeatedly) mutable, so most of the time evac_gen will
2506 --------------------------------------------------------------------------- */
2509 scavenge_large(step *step)
2513 const StgInfoTable* info;
2514 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2516 evac_gen = 0; /* most objects are mutable */
2517 bd = step->new_large_objects;
2519 for (; bd != NULL; bd = step->new_large_objects) {
2521 /* take this object *off* the large objects list and put it on
2522 * the scavenged large objects list. This is so that we can
2523 * treat new_large_objects as a stack and push new objects on
2524 * the front when evacuating.
2526 step->new_large_objects = bd->link;
2527 dbl_link_onto(bd, &step->scavenged_large_objects);
2530 info = get_itbl(stgCast(StgClosure*,p));
2532 switch (info->type) {
2534 /* only certain objects can be "large"... */
2537 /* nothing to follow */
2541 /* follow everything */
2545 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2546 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2547 (StgClosure *)*p = evacuate((StgClosure *)*p);
2552 case MUT_ARR_PTRS_FROZEN:
2553 /* follow everything */
2555 StgPtr start = p, next;
2557 evac_gen = saved_evac_gen; /* not really mutable */
2558 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2559 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2560 (StgClosure *)*p = evacuate((StgClosure *)*p);
2563 if (failed_to_evac) {
2564 recordMutable((StgMutClosure *)start);
2571 StgBCO* bco = stgCast(StgBCO*,p);
2573 evac_gen = saved_evac_gen;
2574 for (i = 0; i < bco->n_ptrs; i++) {
2575 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2586 /* chase the link field for any TSOs on the same queue */
2587 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2588 if ( tso->why_blocked == BlockedOnMVar
2589 || tso->why_blocked == BlockedOnBlackHole) {
2590 tso->block_info.closure = evacuate(tso->block_info.closure);
2592 /* scavenge this thread's stack */
2593 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2598 barf("scavenge_large: unknown/strange object");
2604 zero_static_object_list(StgClosure* first_static)
2608 const StgInfoTable *info;
2610 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2612 link = STATIC_LINK(info, p);
2613 STATIC_LINK(info,p) = NULL;
2617 /* This function is only needed because we share the mutable link
2618 * field with the static link field in an IND_STATIC, so we have to
2619 * zero the mut_link field before doing a major GC, which needs the
2620 * static link field.
2622 * It doesn't do any harm to zero all the mutable link fields on the
2626 zero_mutable_list( StgMutClosure *first )
2628 StgMutClosure *next, *c;
2630 for (c = first; c != END_MUT_LIST; c = next) {
2636 /* -----------------------------------------------------------------------------
2638 -------------------------------------------------------------------------- */
2640 void RevertCAFs(void)
2642 while (enteredCAFs != END_CAF_LIST) {
2643 StgCAF* caf = enteredCAFs;
2645 enteredCAFs = caf->link;
2646 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2647 SET_INFO(caf,&CAF_UNENTERED_info);
2648 caf->value = stgCast(StgClosure*,0xdeadbeef);
2649 caf->link = stgCast(StgCAF*,0xdeadbeef);
2651 enteredCAFs = END_CAF_LIST;
2654 void revert_dead_CAFs(void)
2656 StgCAF* caf = enteredCAFs;
2657 enteredCAFs = END_CAF_LIST;
2658 while (caf != END_CAF_LIST) {
2661 new = (StgCAF*)isAlive((StgClosure*)caf);
2663 new->link = enteredCAFs;
2667 SET_INFO(caf,&CAF_UNENTERED_info);
2668 caf->value = (StgClosure*)0xdeadbeef;
2669 caf->link = (StgCAF*)0xdeadbeef;
2675 /* -----------------------------------------------------------------------------
2676 Sanity code for CAF garbage collection.
2678 With DEBUG turned on, we manage a CAF list in addition to the SRT
2679 mechanism. After GC, we run down the CAF list and blackhole any
2680 CAFs which have been garbage collected. This means we get an error
2681 whenever the program tries to enter a garbage collected CAF.
2683 Any garbage collected CAFs are taken off the CAF list at the same
2685 -------------------------------------------------------------------------- */
2693 const StgInfoTable *info;
2704 ASSERT(info->type == IND_STATIC);
2706 if (STATIC_LINK(info,p) == NULL) {
2707 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2709 SET_INFO(p,&BLACKHOLE_info);
2710 p = STATIC_LINK2(info,p);
2714 pp = &STATIC_LINK2(info,p);
2721 /* fprintf(stderr, "%d CAFs live\n", i); */
2725 /* -----------------------------------------------------------------------------
2728 Whenever a thread returns to the scheduler after possibly doing
2729 some work, we have to run down the stack and black-hole all the
2730 closures referred to by update frames.
2731 -------------------------------------------------------------------------- */
2734 threadLazyBlackHole(StgTSO *tso)
2736 StgUpdateFrame *update_frame;
2737 StgBlockingQueue *bh;
2740 stack_end = &tso->stack[tso->stack_size];
2741 update_frame = tso->su;
2744 switch (get_itbl(update_frame)->type) {
2747 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2751 bh = (StgBlockingQueue *)update_frame->updatee;
2753 /* if the thunk is already blackholed, it means we've also
2754 * already blackholed the rest of the thunks on this stack,
2755 * so we can stop early.
2757 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
2758 * don't interfere with this optimisation.
2760 if (bh->header.info == &BLACKHOLE_info) {
2764 if (bh->header.info != &BLACKHOLE_BQ_info &&
2765 bh->header.info != &CAF_BLACKHOLE_info) {
2766 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
2767 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
2769 SET_INFO(bh,&BLACKHOLE_info);
2772 update_frame = update_frame->link;
2776 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2782 barf("threadPaused");
2787 /* -----------------------------------------------------------------------------
2790 * Code largely pinched from old RTS, then hacked to bits. We also do
2791 * lazy black holing here.
2793 * -------------------------------------------------------------------------- */
2796 threadSqueezeStack(StgTSO *tso)
2798 lnat displacement = 0;
2799 StgUpdateFrame *frame;
2800 StgUpdateFrame *next_frame; /* Temporally next */
2801 StgUpdateFrame *prev_frame; /* Temporally previous */
2803 rtsBool prev_was_update_frame;
2805 bottom = &(tso->stack[tso->stack_size]);
2808 /* There must be at least one frame, namely the STOP_FRAME.
2810 ASSERT((P_)frame < bottom);
2812 /* Walk down the stack, reversing the links between frames so that
2813 * we can walk back up as we squeeze from the bottom. Note that
2814 * next_frame and prev_frame refer to next and previous as they were
2815 * added to the stack, rather than the way we see them in this
2816 * walk. (It makes the next loop less confusing.)
2818 * Stop if we find an update frame pointing to a black hole
2819 * (see comment in threadLazyBlackHole()).
2823 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
2824 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
2825 prev_frame = frame->link;
2826 frame->link = next_frame;
2829 if (get_itbl(frame)->type == UPDATE_FRAME
2830 && frame->updatee->header.info == &BLACKHOLE_info) {
2835 /* Now, we're at the bottom. Frame points to the lowest update
2836 * frame on the stack, and its link actually points to the frame
2837 * above. We have to walk back up the stack, squeezing out empty
2838 * update frames and turning the pointers back around on the way
2841 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2842 * we never want to eliminate it anyway. Just walk one step up
2843 * before starting to squeeze. When you get to the topmost frame,
2844 * remember that there are still some words above it that might have
2851 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2854 * Loop through all of the frames (everything except the very
2855 * bottom). Things are complicated by the fact that we have
2856 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2857 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2859 while (frame != NULL) {
2861 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2862 rtsBool is_update_frame;
2864 next_frame = frame->link;
2865 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2868 * 1. both the previous and current frame are update frames
2869 * 2. the current frame is empty
2871 if (prev_was_update_frame && is_update_frame &&
2872 (P_)prev_frame == frame_bottom + displacement) {
2874 /* Now squeeze out the current frame */
2875 StgClosure *updatee_keep = prev_frame->updatee;
2876 StgClosure *updatee_bypass = frame->updatee;
2879 fprintf(stderr, "squeezing frame at %p\n", frame);
2882 /* Deal with blocking queues. If both updatees have blocked
2883 * threads, then we should merge the queues into the update
2884 * frame that we're keeping.
2886 * Alternatively, we could just wake them up: they'll just go
2887 * straight to sleep on the proper blackhole! This is less code
2888 * and probably less bug prone, although it's probably much
2891 #if 0 /* do it properly... */
2892 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
2893 # error Unimplemented lazy BH warning. (KSW 1999-01)
2895 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
2896 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
2898 /* Sigh. It has one. Don't lose those threads! */
2899 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2900 /* Urgh. Two queues. Merge them. */
2901 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2903 while (keep_tso->link != END_TSO_QUEUE) {
2904 keep_tso = keep_tso->link;
2906 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2909 /* For simplicity, just swap the BQ for the BH */
2910 P_ temp = updatee_keep;
2912 updatee_keep = updatee_bypass;
2913 updatee_bypass = temp;
2915 /* Record the swap in the kept frame (below) */
2916 prev_frame->updatee = updatee_keep;
2921 TICK_UPD_SQUEEZED();
2922 /* wasn't there something about update squeezing and ticky to be sorted out?
2923 * oh yes: we aren't counting each enter properly in this case. See the log somewhere.
2925 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2927 sp = (P_)frame - 1; /* sp = stuff to slide */
2928 displacement += sizeofW(StgUpdateFrame);
2931 /* No squeeze for this frame */
2932 sp = frame_bottom - 1; /* Keep the current frame */
2934 /* Do lazy black-holing.
2936 if (is_update_frame) {
2937 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2938 if (bh->header.info != &BLACKHOLE_info &&
2939 bh->header.info != &BLACKHOLE_BQ_info &&
2940 bh->header.info != &CAF_BLACKHOLE_info) {
2941 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
2942 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
2944 SET_INFO(bh,&BLACKHOLE_info);
2948 /* Fix the link in the current frame (should point to the frame below) */
2949 frame->link = prev_frame;
2950 prev_was_update_frame = is_update_frame;
2953 /* Now slide all words from sp up to the next frame */
2955 if (displacement > 0) {
2956 P_ next_frame_bottom;
2958 if (next_frame != NULL)
2959 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2961 next_frame_bottom = tso->sp - 1;
2964 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2968 while (sp >= next_frame_bottom) {
2969 sp[displacement] = *sp;
2973 (P_)prev_frame = (P_)frame + displacement;
2977 tso->sp += displacement;
2978 tso->su = prev_frame;
2981 /* -----------------------------------------------------------------------------
2984 * We have to prepare for GC - this means doing lazy black holing
2985 * here. We also take the opportunity to do stack squeezing if it's
2987 * -------------------------------------------------------------------------- */
2990 threadPaused(StgTSO *tso)
2992 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2993 threadSqueezeStack(tso); /* does black holing too */
2995 threadLazyBlackHole(tso);