1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.67 1999/11/18 16:02:21 sewardj Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" /* for ReverCAFs prototype */
20 #include "BlockAlloc.h"
25 #include "StablePriv.h"
29 /* STATIC OBJECT LIST.
32 * We maintain a linked list of static objects that are still live.
33 * The requirements for this list are:
35 * - we need to scan the list while adding to it, in order to
36 * scavenge all the static objects (in the same way that
37 * breadth-first scavenging works for dynamic objects).
39 * - we need to be able to tell whether an object is already on
40 * the list, to break loops.
42 * Each static object has a "static link field", which we use for
43 * linking objects on to the list. We use a stack-type list, consing
44 * objects on the front as they are added (this means that the
45 * scavenge phase is depth-first, not breadth-first, but that
48 * A separate list is kept for objects that have been scavenged
49 * already - this is so that we can zero all the marks afterwards.
51 * An object is on the list if its static link field is non-zero; this
52 * means that we have to mark the end of the list with '1', not NULL.
54 * Extra notes for generational GC:
56 * Each generation has a static object list associated with it. When
57 * collecting generations up to N, we treat the static object lists
58 * from generations > N as roots.
60 * We build up a static object list while collecting generations 0..N,
61 * which is then appended to the static object list of generation N+1.
63 StgClosure* static_objects; /* live static objects */
64 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
66 /* N is the oldest generation being collected, where the generations
67 * are numbered starting at 0. A major GC (indicated by the major_gc
68 * flag) is when we're collecting all generations. We only attempt to
69 * deal with static objects and GC CAFs when doing a major GC.
72 static rtsBool major_gc;
74 /* Youngest generation that objects should be evacuated to in
75 * evacuate(). (Logically an argument to evacuate, but it's static
76 * a lot of the time so we optimise it into a global variable).
82 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
83 static rtsBool weak_done; /* all done for this pass */
85 /* Flag indicating failure to evacuate an object to the desired
88 static rtsBool failed_to_evac;
90 /* Old to-space (used for two-space collector only)
94 /* Data used for allocation area sizing.
96 lnat new_blocks; /* blocks allocated during this GC */
97 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
99 /* -----------------------------------------------------------------------------
100 Static function declarations
101 -------------------------------------------------------------------------- */
103 static StgClosure * evacuate ( StgClosure *q );
104 static void zero_static_object_list ( StgClosure* first_static );
105 static void zero_mutable_list ( StgMutClosure *first );
106 static void revert_dead_CAFs ( void );
108 static rtsBool traverse_weak_ptr_list ( void );
109 static void cleanup_weak_ptr_list ( StgWeak **list );
111 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
112 static void scavenge_large ( step *step );
113 static void scavenge ( step *step );
114 static void scavenge_static ( void );
115 static void scavenge_mutable_list ( generation *g );
116 static void scavenge_mut_once_list ( generation *g );
119 static void gcCAFs ( void );
122 /* -----------------------------------------------------------------------------
125 For garbage collecting generation N (and all younger generations):
127 - follow all pointers in the root set. the root set includes all
128 mutable objects in all steps in all generations.
130 - for each pointer, evacuate the object it points to into either
131 + to-space in the next higher step in that generation, if one exists,
132 + if the object's generation == N, then evacuate it to the next
133 generation if one exists, or else to-space in the current
135 + if the object's generation < N, then evacuate it to to-space
136 in the next generation.
138 - repeatedly scavenge to-space from each step in each generation
139 being collected until no more objects can be evacuated.
141 - free from-space in each step, and set from-space = to-space.
143 -------------------------------------------------------------------------- */
145 void GarbageCollect(void (*get_roots)(void))
149 lnat live, allocated, collected = 0, copied = 0;
153 CostCentreStack *prev_CCS;
156 /* tell the stats department that we've started a GC */
159 /* attribute any costs to CCS_GC */
165 /* Approximate how much we allocated */
166 allocated = calcAllocated();
168 /* Figure out which generation to collect
171 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
172 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
176 major_gc = (N == RtsFlags.GcFlags.generations-1);
178 /* check stack sanity *before* GC (ToDo: check all threads) */
179 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
180 IF_DEBUG(sanity, checkFreeListSanity());
182 /* Initialise the static object lists
184 static_objects = END_OF_STATIC_LIST;
185 scavenged_static_objects = END_OF_STATIC_LIST;
187 /* zero the mutable list for the oldest generation (see comment by
188 * zero_mutable_list below).
191 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
194 /* Save the old to-space if we're doing a two-space collection
196 if (RtsFlags.GcFlags.generations == 1) {
197 old_to_space = g0s0->to_space;
198 g0s0->to_space = NULL;
201 /* Keep a count of how many new blocks we allocated during this GC
202 * (used for resizing the allocation area, later).
206 /* Initialise to-space in all the generations/steps that we're
209 for (g = 0; g <= N; g++) {
210 generations[g].mut_once_list = END_MUT_LIST;
211 generations[g].mut_list = END_MUT_LIST;
213 for (s = 0; s < generations[g].n_steps; s++) {
215 /* generation 0, step 0 doesn't need to-space */
216 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
220 /* Get a free block for to-space. Extra blocks will be chained on
224 step = &generations[g].steps[s];
225 ASSERT(step->gen->no == g);
226 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
227 bd->gen = &generations[g];
230 bd->evacuated = 1; /* it's a to-space block */
231 step->hp = bd->start;
232 step->hpLim = step->hp + BLOCK_SIZE_W;
236 step->scan = bd->start;
238 step->new_large_objects = NULL;
239 step->scavenged_large_objects = NULL;
241 /* mark the large objects as not evacuated yet */
242 for (bd = step->large_objects; bd; bd = bd->link) {
248 /* make sure the older generations have at least one block to
249 * allocate into (this makes things easier for copy(), see below.
251 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
252 for (s = 0; s < generations[g].n_steps; s++) {
253 step = &generations[g].steps[s];
254 if (step->hp_bd == NULL) {
256 bd->gen = &generations[g];
259 bd->evacuated = 0; /* *not* a to-space block */
260 step->hp = bd->start;
261 step->hpLim = step->hp + BLOCK_SIZE_W;
267 /* Set the scan pointer for older generations: remember we
268 * still have to scavenge objects that have been promoted. */
269 step->scan = step->hp;
270 step->scan_bd = step->hp_bd;
271 step->to_space = NULL;
273 step->new_large_objects = NULL;
274 step->scavenged_large_objects = NULL;
278 /* -----------------------------------------------------------------------
279 * follow all the roots that we know about:
280 * - mutable lists from each generation > N
281 * we want to *scavenge* these roots, not evacuate them: they're not
282 * going to move in this GC.
283 * Also: do them in reverse generation order. This is because we
284 * often want to promote objects that are pointed to by older
285 * generations early, so we don't have to repeatedly copy them.
286 * Doing the generations in reverse order ensures that we don't end
287 * up in the situation where we want to evac an object to gen 3 and
288 * it has already been evaced to gen 2.
292 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
293 generations[g].saved_mut_list = generations[g].mut_list;
294 generations[g].mut_list = END_MUT_LIST;
297 /* Do the mut-once lists first */
298 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
299 scavenge_mut_once_list(&generations[g]);
301 for (st = generations[g].n_steps-1; st >= 0; st--) {
302 scavenge(&generations[g].steps[st]);
306 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
307 scavenge_mutable_list(&generations[g]);
309 for (st = generations[g].n_steps-1; st >= 0; st--) {
310 scavenge(&generations[g].steps[st]);
315 /* follow all the roots that the application knows about.
320 /* Mark the weak pointer list, and prepare to detect dead weak
323 old_weak_ptr_list = weak_ptr_list;
324 weak_ptr_list = NULL;
325 weak_done = rtsFalse;
327 /* Mark the stable pointer table.
329 markStablePtrTable(major_gc);
333 /* ToDo: To fix the caf leak, we need to make the commented out
334 * parts of this code do something sensible - as described in
337 extern void markHugsObjects(void);
342 /* -------------------------------------------------------------------------
343 * Repeatedly scavenge all the areas we know about until there's no
344 * more scavenging to be done.
351 /* scavenge static objects */
352 if (major_gc && static_objects != END_OF_STATIC_LIST) {
356 /* When scavenging the older generations: Objects may have been
357 * evacuated from generations <= N into older generations, and we
358 * need to scavenge these objects. We're going to try to ensure that
359 * any evacuations that occur move the objects into at least the
360 * same generation as the object being scavenged, otherwise we
361 * have to create new entries on the mutable list for the older
365 /* scavenge each step in generations 0..maxgen */
369 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
370 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
371 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
374 step = &generations[gen].steps[st];
376 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
381 if (step->new_large_objects != NULL) {
382 scavenge_large(step);
389 if (flag) { goto loop; }
391 /* must be last... */
392 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
397 /* Final traversal of the weak pointer list (see comment by
398 * cleanUpWeakPtrList below).
400 cleanup_weak_ptr_list(&weak_ptr_list);
402 /* Now see which stable names are still alive.
404 gcStablePtrTable(major_gc);
406 /* revert dead CAFs and update enteredCAFs list */
409 /* Set the maximum blocks for the oldest generation, based on twice
410 * the amount of live data now, adjusted to fit the maximum heap
413 * This is an approximation, since in the worst case we'll need
414 * twice the amount of live data plus whatever space the other
417 if (RtsFlags.GcFlags.generations > 1) {
419 oldest_gen->max_blocks =
420 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
421 RtsFlags.GcFlags.minOldGenSize);
422 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
423 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
424 if (((int)oldest_gen->max_blocks -
425 (int)oldest_gen->steps[0].to_blocks) <
426 (RtsFlags.GcFlags.pcFreeHeap *
427 RtsFlags.GcFlags.maxHeapSize / 200)) {
434 /* run through all the generations/steps and tidy up
436 copied = new_blocks * BLOCK_SIZE_W;
437 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
440 generations[g].collections++; /* for stats */
443 for (s = 0; s < generations[g].n_steps; s++) {
445 step = &generations[g].steps[s];
447 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
448 /* Tidy the end of the to-space chains */
449 step->hp_bd->free = step->hp;
450 step->hp_bd->link = NULL;
451 /* stats information: how much we copied */
453 copied -= step->hp_bd->start + BLOCK_SIZE_W -
458 /* for generations we collected... */
461 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
463 /* free old memory and shift to-space into from-space for all
464 * the collected steps (except the allocation area). These
465 * freed blocks will probaby be quickly recycled.
467 if (!(g == 0 && s == 0)) {
468 freeChain(step->blocks);
469 step->blocks = step->to_space;
470 step->n_blocks = step->to_blocks;
471 step->to_space = NULL;
473 for (bd = step->blocks; bd != NULL; bd = bd->link) {
474 bd->evacuated = 0; /* now from-space */
478 /* LARGE OBJECTS. The current live large objects are chained on
479 * scavenged_large, having been moved during garbage
480 * collection from large_objects. Any objects left on
481 * large_objects list are therefore dead, so we free them here.
483 for (bd = step->large_objects; bd != NULL; bd = next) {
488 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
491 step->large_objects = step->scavenged_large_objects;
493 /* Set the maximum blocks for this generation, interpolating
494 * between the maximum size of the oldest and youngest
497 * max_blocks = oldgen_max_blocks * G
498 * ----------------------
503 generations[g].max_blocks = (oldest_gen->max_blocks * g)
504 / (RtsFlags.GcFlags.generations-1);
506 generations[g].max_blocks = oldest_gen->max_blocks;
509 /* for older generations... */
512 /* For older generations, we need to append the
513 * scavenged_large_object list (i.e. large objects that have been
514 * promoted during this GC) to the large_object list for that step.
516 for (bd = step->scavenged_large_objects; bd; bd = next) {
519 dbl_link_onto(bd, &step->large_objects);
522 /* add the new blocks we promoted during this GC */
523 step->n_blocks += step->to_blocks;
528 /* Guess the amount of live data for stats. */
531 /* Free the small objects allocated via allocate(), since this will
532 * all have been copied into G0S1 now.
534 if (small_alloc_list != NULL) {
535 freeChain(small_alloc_list);
537 small_alloc_list = NULL;
541 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
543 /* Two-space collector:
544 * Free the old to-space, and estimate the amount of live data.
546 if (RtsFlags.GcFlags.generations == 1) {
549 if (old_to_space != NULL) {
550 freeChain(old_to_space);
552 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
553 bd->evacuated = 0; /* now from-space */
556 /* For a two-space collector, we need to resize the nursery. */
558 /* set up a new nursery. Allocate a nursery size based on a
559 * function of the amount of live data (currently a factor of 2,
560 * should be configurable (ToDo)). Use the blocks from the old
561 * nursery if possible, freeing up any left over blocks.
563 * If we get near the maximum heap size, then adjust our nursery
564 * size accordingly. If the nursery is the same size as the live
565 * data (L), then we need 3L bytes. We can reduce the size of the
566 * nursery to bring the required memory down near 2L bytes.
568 * A normal 2-space collector would need 4L bytes to give the same
569 * performance we get from 3L bytes, reducing to the same
570 * performance at 2L bytes.
572 blocks = g0s0->to_blocks;
574 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
575 RtsFlags.GcFlags.maxHeapSize ) {
576 int adjusted_blocks; /* signed on purpose */
579 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
580 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
581 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
582 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
585 blocks = adjusted_blocks;
588 blocks *= RtsFlags.GcFlags.oldGenFactor;
589 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
590 blocks = RtsFlags.GcFlags.minAllocAreaSize;
593 resizeNursery(blocks);
596 /* Generational collector:
597 * If the user has given us a suggested heap size, adjust our
598 * allocation area to make best use of the memory available.
601 if (RtsFlags.GcFlags.heapSizeSuggestion) {
603 nat needed = calcNeeded(); /* approx blocks needed at next GC */
605 /* Guess how much will be live in generation 0 step 0 next time.
606 * A good approximation is the obtained by finding the
607 * percentage of g0s0 that was live at the last minor GC.
610 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
613 /* Estimate a size for the allocation area based on the
614 * information available. We might end up going slightly under
615 * or over the suggested heap size, but we should be pretty
618 * Formula: suggested - needed
619 * ----------------------------
620 * 1 + g0s0_pcnt_kept/100
622 * where 'needed' is the amount of memory needed at the next
623 * collection for collecting all steps except g0s0.
626 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
627 (100 + (int)g0s0_pcnt_kept);
629 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
630 blocks = RtsFlags.GcFlags.minAllocAreaSize;
633 resizeNursery((nat)blocks);
637 /* mark the garbage collected CAFs as dead */
639 if (major_gc) { gcCAFs(); }
642 /* zero the scavenged static object list */
644 zero_static_object_list(scavenged_static_objects);
651 /* start any pending finalizers */
652 scheduleFinalizers(old_weak_ptr_list);
654 /* check sanity after GC */
655 IF_DEBUG(sanity, checkSanity(N));
657 /* extra GC trace info */
658 IF_DEBUG(gc, stat_describe_gens());
661 /* symbol-table based profiling */
662 /* heapCensus(to_space); */ /* ToDo */
665 /* restore enclosing cost centre */
671 /* check for memory leaks if sanity checking is on */
672 IF_DEBUG(sanity, memInventory());
674 /* ok, GC over: tell the stats department what happened. */
675 stat_endGC(allocated, collected, live, copied, N);
678 /* -----------------------------------------------------------------------------
681 traverse_weak_ptr_list is called possibly many times during garbage
682 collection. It returns a flag indicating whether it did any work
683 (i.e. called evacuate on any live pointers).
685 Invariant: traverse_weak_ptr_list is called when the heap is in an
686 idempotent state. That means that there are no pending
687 evacuate/scavenge operations. This invariant helps the weak
688 pointer code decide which weak pointers are dead - if there are no
689 new live weak pointers, then all the currently unreachable ones are
692 For generational GC: we just don't try to finalize weak pointers in
693 older generations than the one we're collecting. This could
694 probably be optimised by keeping per-generation lists of weak
695 pointers, but for a few weak pointers this scheme will work.
696 -------------------------------------------------------------------------- */
699 traverse_weak_ptr_list(void)
701 StgWeak *w, **last_w, *next_w;
703 rtsBool flag = rtsFalse;
705 if (weak_done) { return rtsFalse; }
707 /* doesn't matter where we evacuate values/finalizers to, since
708 * these pointers are treated as roots (iff the keys are alive).
712 last_w = &old_weak_ptr_list;
713 for (w = old_weak_ptr_list; w; w = next_w) {
715 /* First, this weak pointer might have been evacuated. If so,
716 * remove the forwarding pointer from the weak_ptr_list.
718 if (get_itbl(w)->type == EVACUATED) {
719 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
723 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
724 * called on a live weak pointer object. Just remove it.
726 if (w->header.info == &DEAD_WEAK_info) {
727 next_w = ((StgDeadWeak *)w)->link;
732 ASSERT(get_itbl(w)->type == WEAK);
734 /* Now, check whether the key is reachable.
736 if ((new = isAlive(w->key))) {
738 /* evacuate the value and finalizer */
739 w->value = evacuate(w->value);
740 w->finalizer = evacuate(w->finalizer);
741 /* remove this weak ptr from the old_weak_ptr list */
743 /* and put it on the new weak ptr list */
745 w->link = weak_ptr_list;
748 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
758 /* If we didn't make any changes, then we can go round and kill all
759 * the dead weak pointers. The old_weak_ptr list is used as a list
760 * of pending finalizers later on.
762 if (flag == rtsFalse) {
763 cleanup_weak_ptr_list(&old_weak_ptr_list);
764 for (w = old_weak_ptr_list; w; w = w->link) {
765 w->finalizer = evacuate(w->finalizer);
773 /* -----------------------------------------------------------------------------
774 After GC, the live weak pointer list may have forwarding pointers
775 on it, because a weak pointer object was evacuated after being
776 moved to the live weak pointer list. We remove those forwarding
779 Also, we don't consider weak pointer objects to be reachable, but
780 we must nevertheless consider them to be "live" and retain them.
781 Therefore any weak pointer objects which haven't as yet been
782 evacuated need to be evacuated now.
783 -------------------------------------------------------------------------- */
786 cleanup_weak_ptr_list ( StgWeak **list )
788 StgWeak *w, **last_w;
791 for (w = *list; w; w = w->link) {
793 if (get_itbl(w)->type == EVACUATED) {
794 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
798 if (Bdescr((P_)w)->evacuated == 0) {
799 (StgClosure *)w = evacuate((StgClosure *)w);
806 /* -----------------------------------------------------------------------------
807 isAlive determines whether the given closure is still alive (after
808 a garbage collection) or not. It returns the new address of the
809 closure if it is alive, or NULL otherwise.
810 -------------------------------------------------------------------------- */
813 isAlive(StgClosure *p)
815 const StgInfoTable *info;
821 /* ToDo: for static closures, check the static link field.
822 * Problem here is that we sometimes don't set the link field, eg.
823 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
826 /* ignore closures in generations that we're not collecting. */
827 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
831 switch (info->type) {
836 case IND_OLDGEN: /* rely on compatible layout with StgInd */
837 case IND_OLDGEN_PERM:
838 /* follow indirections */
839 p = ((StgInd *)p)->indirectee;
844 return ((StgEvacuated *)p)->evacuee;
854 MarkRoot(StgClosure *root)
856 return evacuate(root);
859 static void addBlock(step *step)
861 bdescr *bd = allocBlock();
865 if (step->gen->no <= N) {
871 step->hp_bd->free = step->hp;
872 step->hp_bd->link = bd;
873 step->hp = bd->start;
874 step->hpLim = step->hp + BLOCK_SIZE_W;
880 static __inline__ void
881 upd_evacuee(StgClosure *p, StgClosure *dest)
883 p->header.info = &EVACUATED_info;
884 ((StgEvacuated *)p)->evacuee = dest;
887 static __inline__ StgClosure *
888 copy(StgClosure *src, nat size, step *step)
892 TICK_GC_WORDS_COPIED(size);
893 /* Find out where we're going, using the handy "to" pointer in
894 * the step of the source object. If it turns out we need to
895 * evacuate to an older generation, adjust it here (see comment
898 if (step->gen->no < evac_gen) {
899 #ifdef NO_EAGER_PROMOTION
900 failed_to_evac = rtsTrue;
902 step = &generations[evac_gen].steps[0];
906 /* chain a new block onto the to-space for the destination step if
909 if (step->hp + size >= step->hpLim) {
913 for(to = step->hp, from = (P_)src; size>0; --size) {
919 upd_evacuee(src,(StgClosure *)dest);
920 return (StgClosure *)dest;
923 /* Special version of copy() for when we only want to copy the info
924 * pointer of an object, but reserve some padding after it. This is
925 * used to optimise evacuation of BLACKHOLEs.
928 static __inline__ StgClosure *
929 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
933 TICK_GC_WORDS_COPIED(size_to_copy);
934 if (step->gen->no < evac_gen) {
935 #ifdef NO_EAGER_PROMOTION
936 failed_to_evac = rtsTrue;
938 step = &generations[evac_gen].steps[0];
942 if (step->hp + size_to_reserve >= step->hpLim) {
946 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
951 step->hp += size_to_reserve;
952 upd_evacuee(src,(StgClosure *)dest);
953 return (StgClosure *)dest;
956 /* -----------------------------------------------------------------------------
957 Evacuate a large object
959 This just consists of removing the object from the (doubly-linked)
960 large_alloc_list, and linking it on to the (singly-linked)
961 new_large_objects list, from where it will be scavenged later.
963 Convention: bd->evacuated is /= 0 for a large object that has been
964 evacuated, or 0 otherwise.
965 -------------------------------------------------------------------------- */
968 evacuate_large(StgPtr p, rtsBool mutable)
970 bdescr *bd = Bdescr(p);
973 /* should point to the beginning of the block */
974 ASSERT(((W_)p & BLOCK_MASK) == 0);
976 /* already evacuated? */
978 /* Don't forget to set the failed_to_evac flag if we didn't get
979 * the desired destination (see comments in evacuate()).
981 if (bd->gen->no < evac_gen) {
982 failed_to_evac = rtsTrue;
983 TICK_GC_FAILED_PROMOTION();
989 /* remove from large_object list */
991 bd->back->link = bd->link;
992 } else { /* first object in the list */
993 step->large_objects = bd->link;
996 bd->link->back = bd->back;
999 /* link it on to the evacuated large object list of the destination step
1001 step = bd->step->to;
1002 if (step->gen->no < evac_gen) {
1003 #ifdef NO_EAGER_PROMOTION
1004 failed_to_evac = rtsTrue;
1006 step = &generations[evac_gen].steps[0];
1011 bd->gen = step->gen;
1012 bd->link = step->new_large_objects;
1013 step->new_large_objects = bd;
1017 recordMutable((StgMutClosure *)p);
1021 /* -----------------------------------------------------------------------------
1022 Adding a MUT_CONS to an older generation.
1024 This is necessary from time to time when we end up with an
1025 old-to-new generation pointer in a non-mutable object. We defer
1026 the promotion until the next GC.
1027 -------------------------------------------------------------------------- */
1030 mkMutCons(StgClosure *ptr, generation *gen)
1035 step = &gen->steps[0];
1037 /* chain a new block onto the to-space for the destination step if
1040 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1044 q = (StgMutVar *)step->hp;
1045 step->hp += sizeofW(StgMutVar);
1047 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1049 recordOldToNewPtrs((StgMutClosure *)q);
1051 return (StgClosure *)q;
1054 /* -----------------------------------------------------------------------------
1057 This is called (eventually) for every live object in the system.
1059 The caller to evacuate specifies a desired generation in the
1060 evac_gen global variable. The following conditions apply to
1061 evacuating an object which resides in generation M when we're
1062 collecting up to generation N
1066 else evac to step->to
1068 if M < evac_gen evac to evac_gen, step 0
1070 if the object is already evacuated, then we check which generation
1073 if M >= evac_gen do nothing
1074 if M < evac_gen set failed_to_evac flag to indicate that we
1075 didn't manage to evacuate this object into evac_gen.
1077 -------------------------------------------------------------------------- */
1081 evacuate(StgClosure *q)
1086 const StgInfoTable *info;
1089 if (HEAP_ALLOCED(q)) {
1091 if (bd->gen->no > N) {
1092 /* Can't evacuate this object, because it's in a generation
1093 * older than the ones we're collecting. Let's hope that it's
1094 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1096 if (bd->gen->no < evac_gen) {
1098 failed_to_evac = rtsTrue;
1099 TICK_GC_FAILED_PROMOTION();
1103 step = bd->step->to;
1106 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1109 /* make sure the info pointer is into text space */
1110 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1111 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1114 switch (info -> type) {
1118 nat size = bco_sizeW((StgBCO*)q);
1120 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1121 evacuate_large((P_)q, rtsFalse);
1124 /* just copy the block */
1125 to = copy(q,size,step);
1131 ASSERT(q->header.info != &MUT_CONS_info);
1133 to = copy(q,sizeW_fromITBL(info),step);
1134 recordMutable((StgMutClosure *)to);
1141 return copy(q,sizeofW(StgHeader)+1,step);
1143 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1148 #ifdef NO_PROMOTE_THUNKS
1149 if (bd->gen->no == 0 &&
1150 bd->step->no != 0 &&
1151 bd->step->no == bd->gen->n_steps-1) {
1155 return copy(q,sizeofW(StgHeader)+2,step);
1163 return copy(q,sizeofW(StgHeader)+2,step);
1169 case IND_OLDGEN_PERM:
1175 return copy(q,sizeW_fromITBL(info),step);
1178 case SE_CAF_BLACKHOLE:
1181 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1184 to = copy(q,BLACKHOLE_sizeW(),step);
1185 recordMutable((StgMutClosure *)to);
1188 case THUNK_SELECTOR:
1190 const StgInfoTable* selectee_info;
1191 StgClosure* selectee = ((StgSelector*)q)->selectee;
1194 selectee_info = get_itbl(selectee);
1195 switch (selectee_info->type) {
1204 StgWord32 offset = info->layout.selector_offset;
1206 /* check that the size is in range */
1208 (StgWord32)(selectee_info->layout.payload.ptrs +
1209 selectee_info->layout.payload.nptrs));
1211 /* perform the selection! */
1212 q = selectee->payload[offset];
1214 /* if we're already in to-space, there's no need to continue
1215 * with the evacuation, just update the source address with
1216 * a pointer to the (evacuated) constructor field.
1218 if (HEAP_ALLOCED(q)) {
1219 bdescr *bd = Bdescr((P_)q);
1220 if (bd->evacuated) {
1221 if (bd->gen->no < evac_gen) {
1222 failed_to_evac = rtsTrue;
1223 TICK_GC_FAILED_PROMOTION();
1229 /* otherwise, carry on and evacuate this constructor field,
1230 * (but not the constructor itself)
1239 case IND_OLDGEN_PERM:
1240 selectee = stgCast(StgInd *,selectee)->indirectee;
1244 selectee = stgCast(StgCAF *,selectee)->value;
1248 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1258 case THUNK_SELECTOR:
1259 /* aargh - do recursively???? */
1262 case SE_CAF_BLACKHOLE:
1266 /* not evaluated yet */
1270 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1271 (int)(selectee_info->type));
1274 return copy(q,THUNK_SELECTOR_sizeW(),step);
1278 /* follow chains of indirections, don't evacuate them */
1279 q = ((StgInd*)q)->indirectee;
1283 if (info->srt_len > 0 && major_gc &&
1284 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1285 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1286 static_objects = (StgClosure *)q;
1291 if (info->srt_len > 0 && major_gc &&
1292 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1293 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1294 static_objects = (StgClosure *)q;
1299 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1300 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1301 static_objects = (StgClosure *)q;
1306 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1307 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1308 static_objects = (StgClosure *)q;
1312 case CONSTR_INTLIKE:
1313 case CONSTR_CHARLIKE:
1314 case CONSTR_NOCAF_STATIC:
1315 /* no need to put these on the static linked list, they don't need
1330 /* shouldn't see these */
1331 barf("evacuate: stack frame\n");
1335 /* these are special - the payload is a copy of a chunk of stack,
1337 return copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1340 /* Already evacuated, just return the forwarding address.
1341 * HOWEVER: if the requested destination generation (evac_gen) is
1342 * older than the actual generation (because the object was
1343 * already evacuated to a younger generation) then we have to
1344 * set the failed_to_evac flag to indicate that we couldn't
1345 * manage to promote the object to the desired generation.
1347 if (evac_gen > 0) { /* optimisation */
1348 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1349 if (Bdescr((P_)p)->gen->no < evac_gen) {
1350 /* fprintf(stderr,"evac failed!\n");*/
1351 failed_to_evac = rtsTrue;
1352 TICK_GC_FAILED_PROMOTION();
1355 return ((StgEvacuated*)q)->evacuee;
1359 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1361 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1362 evacuate_large((P_)q, rtsFalse);
1365 /* just copy the block */
1366 return copy(q,size,step);
1371 case MUT_ARR_PTRS_FROZEN:
1373 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1375 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1376 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1379 /* just copy the block */
1380 to = copy(q,size,step);
1381 if (info->type == MUT_ARR_PTRS) {
1382 recordMutable((StgMutClosure *)to);
1390 StgTSO *tso = stgCast(StgTSO *,q);
1391 nat size = tso_sizeW(tso);
1394 /* Large TSOs don't get moved, so no relocation is required.
1396 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1397 evacuate_large((P_)q, rtsTrue);
1400 /* To evacuate a small TSO, we need to relocate the update frame
1404 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1406 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1408 /* relocate the stack pointers... */
1409 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1410 new_tso->sp = (StgPtr)new_tso->sp + diff;
1411 new_tso->splim = (StgPtr)new_tso->splim + diff;
1413 relocate_TSO(tso, new_tso);
1415 recordMutable((StgMutClosure *)new_tso);
1416 return (StgClosure *)new_tso;
1422 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1426 barf("evacuate: strange closure type %d", (int)(info->type));
1432 /* -----------------------------------------------------------------------------
1433 relocate_TSO is called just after a TSO has been copied from src to
1434 dest. It adjusts the update frame list for the new location.
1435 -------------------------------------------------------------------------- */
1438 relocate_TSO(StgTSO *src, StgTSO *dest)
1445 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1449 while ((P_)su < dest->stack + dest->stack_size) {
1450 switch (get_itbl(su)->type) {
1452 /* GCC actually manages to common up these three cases! */
1455 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1460 cf = (StgCatchFrame *)su;
1461 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1466 sf = (StgSeqFrame *)su;
1467 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1476 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1485 scavenge_srt(const StgInfoTable *info)
1487 StgClosure **srt, **srt_end;
1489 /* evacuate the SRT. If srt_len is zero, then there isn't an
1490 * srt field in the info table. That's ok, because we'll
1491 * never dereference it.
1493 srt = stgCast(StgClosure **,info->srt);
1494 srt_end = srt + info->srt_len;
1495 for (; srt < srt_end; srt++) {
1496 /* Special-case to handle references to closures hiding out in DLLs, since
1497 double indirections required to get at those. The code generator knows
1498 which is which when generating the SRT, so it stores the (indirect)
1499 reference to the DLL closure in the table by first adding one to it.
1500 We check for this here, and undo the addition before evacuating it.
1502 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1503 closure that's fixed at link-time, and no extra magic is required.
1505 #ifdef ENABLE_WIN32_DLL_SUPPORT
1506 if ( stgCast(unsigned long,*srt) & 0x1 ) {
1507 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1517 /* -----------------------------------------------------------------------------
1518 Scavenge a given step until there are no more objects in this step
1521 evac_gen is set by the caller to be either zero (for a step in a
1522 generation < N) or G where G is the generation of the step being
1525 We sometimes temporarily change evac_gen back to zero if we're
1526 scavenging a mutable object where early promotion isn't such a good
1528 -------------------------------------------------------------------------- */
1532 scavenge(step *step)
1535 const StgInfoTable *info;
1537 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1542 failed_to_evac = rtsFalse;
1544 /* scavenge phase - standard breadth-first scavenging of the
1548 while (bd != step->hp_bd || p < step->hp) {
1550 /* If we're at the end of this block, move on to the next block */
1551 if (bd != step->hp_bd && p == bd->free) {
1557 q = p; /* save ptr to object */
1559 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1560 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1562 info = get_itbl((StgClosure *)p);
1563 switch (info -> type) {
1567 StgBCO* bco = stgCast(StgBCO*,p);
1569 for (i = 0; i < bco->n_ptrs; i++) {
1570 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1572 p += bco_sizeW(bco);
1577 /* treat MVars specially, because we don't want to evacuate the
1578 * mut_link field in the middle of the closure.
1581 StgMVar *mvar = ((StgMVar *)p);
1583 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1584 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1585 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1586 p += sizeofW(StgMVar);
1587 evac_gen = saved_evac_gen;
1595 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1596 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1597 p += sizeofW(StgHeader) + 2;
1602 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1603 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1609 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1610 p += sizeofW(StgHeader) + 1;
1615 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1621 p += sizeofW(StgHeader) + 1;
1628 p += sizeofW(StgHeader) + 2;
1635 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1636 p += sizeofW(StgHeader) + 2;
1651 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1652 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1653 (StgClosure *)*p = evacuate((StgClosure *)*p);
1655 p += info->layout.payload.nptrs;
1660 if (step->gen->no != 0) {
1661 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1664 case IND_OLDGEN_PERM:
1665 ((StgIndOldGen *)p)->indirectee =
1666 evacuate(((StgIndOldGen *)p)->indirectee);
1667 if (failed_to_evac) {
1668 failed_to_evac = rtsFalse;
1669 recordOldToNewPtrs((StgMutClosure *)p);
1671 p += sizeofW(StgIndOldGen);
1676 StgCAF *caf = (StgCAF *)p;
1678 caf->body = evacuate(caf->body);
1679 if (failed_to_evac) {
1680 failed_to_evac = rtsFalse;
1681 recordOldToNewPtrs((StgMutClosure *)p);
1683 caf->mut_link = NULL;
1685 p += sizeofW(StgCAF);
1691 StgCAF *caf = (StgCAF *)p;
1693 caf->body = evacuate(caf->body);
1694 caf->value = evacuate(caf->value);
1695 if (failed_to_evac) {
1696 failed_to_evac = rtsFalse;
1697 recordOldToNewPtrs((StgMutClosure *)p);
1699 caf->mut_link = NULL;
1701 p += sizeofW(StgCAF);
1706 /* ignore MUT_CONSs */
1707 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1709 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1710 evac_gen = saved_evac_gen;
1712 p += sizeofW(StgMutVar);
1716 case SE_CAF_BLACKHOLE:
1719 p += BLACKHOLE_sizeW();
1724 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1725 (StgClosure *)bh->blocking_queue =
1726 evacuate((StgClosure *)bh->blocking_queue);
1727 if (failed_to_evac) {
1728 failed_to_evac = rtsFalse;
1729 recordMutable((StgMutClosure *)bh);
1731 p += BLACKHOLE_sizeW();
1735 case THUNK_SELECTOR:
1737 StgSelector *s = (StgSelector *)p;
1738 s->selectee = evacuate(s->selectee);
1739 p += THUNK_SELECTOR_sizeW();
1745 barf("scavenge:IND???\n");
1747 case CONSTR_INTLIKE:
1748 case CONSTR_CHARLIKE:
1750 case CONSTR_NOCAF_STATIC:
1754 /* Shouldn't see a static object here. */
1755 barf("scavenge: STATIC object\n");
1767 /* Shouldn't see stack frames here. */
1768 barf("scavenge: stack frame\n");
1770 case AP_UPD: /* same as PAPs */
1772 /* Treat a PAP just like a section of stack, not forgetting to
1773 * evacuate the function pointer too...
1776 StgPAP* pap = stgCast(StgPAP*,p);
1778 pap->fun = evacuate(pap->fun);
1779 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1780 p += pap_sizeW(pap);
1785 /* nothing to follow */
1786 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1790 /* follow everything */
1794 evac_gen = 0; /* repeatedly mutable */
1795 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1796 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1797 (StgClosure *)*p = evacuate((StgClosure *)*p);
1799 evac_gen = saved_evac_gen;
1803 case MUT_ARR_PTRS_FROZEN:
1804 /* follow everything */
1806 StgPtr start = p, next;
1808 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1809 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1810 (StgClosure *)*p = evacuate((StgClosure *)*p);
1812 if (failed_to_evac) {
1813 /* we can do this easier... */
1814 recordMutable((StgMutClosure *)start);
1815 failed_to_evac = rtsFalse;
1826 /* chase the link field for any TSOs on the same queue */
1827 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1828 if ( tso->why_blocked == BlockedOnMVar
1829 || tso->why_blocked == BlockedOnBlackHole) {
1830 tso->block_info.closure = evacuate(tso->block_info.closure);
1832 /* scavenge this thread's stack */
1833 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1834 evac_gen = saved_evac_gen;
1835 p += tso_sizeW(tso);
1842 barf("scavenge: unimplemented/strange closure type\n");
1848 /* If we didn't manage to promote all the objects pointed to by
1849 * the current object, then we have to designate this object as
1850 * mutable (because it contains old-to-new generation pointers).
1852 if (failed_to_evac) {
1853 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1854 failed_to_evac = rtsFalse;
1862 /* -----------------------------------------------------------------------------
1863 Scavenge one object.
1865 This is used for objects that are temporarily marked as mutable
1866 because they contain old-to-new generation pointers. Only certain
1867 objects can have this property.
1868 -------------------------------------------------------------------------- */
1870 scavenge_one(StgClosure *p)
1872 const StgInfoTable *info;
1875 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1876 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1880 switch (info -> type) {
1883 case FUN_1_0: /* hardly worth specialising these guys */
1903 case IND_OLDGEN_PERM:
1908 end = (P_)p->payload + info->layout.payload.ptrs;
1909 for (q = (P_)p->payload; q < end; q++) {
1910 (StgClosure *)*q = evacuate((StgClosure *)*q);
1916 case SE_CAF_BLACKHOLE:
1921 case THUNK_SELECTOR:
1923 StgSelector *s = (StgSelector *)p;
1924 s->selectee = evacuate(s->selectee);
1928 case AP_UPD: /* same as PAPs */
1930 /* Treat a PAP just like a section of stack, not forgetting to
1931 * evacuate the function pointer too...
1934 StgPAP* pap = (StgPAP *)p;
1936 pap->fun = evacuate(pap->fun);
1937 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1942 /* This might happen if for instance a MUT_CONS was pointing to a
1943 * THUNK which has since been updated. The IND_OLDGEN will
1944 * be on the mutable list anyway, so we don't need to do anything
1950 barf("scavenge_one: strange object");
1953 no_luck = failed_to_evac;
1954 failed_to_evac = rtsFalse;
1959 /* -----------------------------------------------------------------------------
1960 Scavenging mutable lists.
1962 We treat the mutable list of each generation > N (i.e. all the
1963 generations older than the one being collected) as roots. We also
1964 remove non-mutable objects from the mutable list at this point.
1965 -------------------------------------------------------------------------- */
1968 scavenge_mut_once_list(generation *gen)
1970 const StgInfoTable *info;
1971 StgMutClosure *p, *next, *new_list;
1973 p = gen->mut_once_list;
1974 new_list = END_MUT_LIST;
1978 failed_to_evac = rtsFalse;
1980 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
1982 /* make sure the info pointer is into text space */
1983 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1984 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1987 switch(info->type) {
1990 case IND_OLDGEN_PERM:
1992 /* Try to pull the indirectee into this generation, so we can
1993 * remove the indirection from the mutable list.
1995 ((StgIndOldGen *)p)->indirectee =
1996 evacuate(((StgIndOldGen *)p)->indirectee);
1999 /* Debugging code to print out the size of the thing we just
2003 StgPtr start = gen->steps[0].scan;
2004 bdescr *start_bd = gen->steps[0].scan_bd;
2006 scavenge(&gen->steps[0]);
2007 if (start_bd != gen->steps[0].scan_bd) {
2008 size += (P_)BLOCK_ROUND_UP(start) - start;
2009 start_bd = start_bd->link;
2010 while (start_bd != gen->steps[0].scan_bd) {
2011 size += BLOCK_SIZE_W;
2012 start_bd = start_bd->link;
2014 size += gen->steps[0].scan -
2015 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2017 size = gen->steps[0].scan - start;
2019 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2023 /* failed_to_evac might happen if we've got more than two
2024 * generations, we're collecting only generation 0, the
2025 * indirection resides in generation 2 and the indirectee is
2028 if (failed_to_evac) {
2029 failed_to_evac = rtsFalse;
2030 p->mut_link = new_list;
2033 /* the mut_link field of an IND_STATIC is overloaded as the
2034 * static link field too (it just so happens that we don't need
2035 * both at the same time), so we need to NULL it out when
2036 * removing this object from the mutable list because the static
2037 * link fields are all assumed to be NULL before doing a major
2045 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2046 * it from the mutable list if possible by promoting whatever it
2049 ASSERT(p->header.info == &MUT_CONS_info);
2050 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2051 /* didn't manage to promote everything, so put the
2052 * MUT_CONS back on the list.
2054 p->mut_link = new_list;
2061 StgCAF *caf = (StgCAF *)p;
2062 caf->body = evacuate(caf->body);
2063 caf->value = evacuate(caf->value);
2064 if (failed_to_evac) {
2065 failed_to_evac = rtsFalse;
2066 p->mut_link = new_list;
2076 StgCAF *caf = (StgCAF *)p;
2077 caf->body = evacuate(caf->body);
2078 if (failed_to_evac) {
2079 failed_to_evac = rtsFalse;
2080 p->mut_link = new_list;
2089 /* shouldn't have anything else on the mutables list */
2090 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2094 gen->mut_once_list = new_list;
2099 scavenge_mutable_list(generation *gen)
2101 const StgInfoTable *info;
2102 StgMutClosure *p, *next;
2104 p = gen->saved_mut_list;
2108 failed_to_evac = rtsFalse;
2110 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2112 /* make sure the info pointer is into text space */
2113 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2114 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2117 switch(info->type) {
2119 case MUT_ARR_PTRS_FROZEN:
2120 /* remove this guy from the mutable list, but follow the ptrs
2121 * anyway (and make sure they get promoted to this gen).
2126 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2128 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2129 (StgClosure *)*q = evacuate((StgClosure *)*q);
2133 if (failed_to_evac) {
2134 failed_to_evac = rtsFalse;
2135 p->mut_link = gen->mut_list;
2142 /* follow everything */
2143 p->mut_link = gen->mut_list;
2148 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2149 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2150 (StgClosure *)*q = evacuate((StgClosure *)*q);
2156 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2157 * it from the mutable list if possible by promoting whatever it
2160 ASSERT(p->header.info != &MUT_CONS_info);
2161 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2162 p->mut_link = gen->mut_list;
2168 StgMVar *mvar = (StgMVar *)p;
2169 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2170 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2171 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2172 p->mut_link = gen->mut_list;
2179 StgTSO *tso = (StgTSO *)p;
2181 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2182 if ( tso->why_blocked == BlockedOnMVar
2183 || tso->why_blocked == BlockedOnBlackHole) {
2184 tso->block_info.closure = evacuate(tso->block_info.closure);
2186 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2188 /* Don't take this TSO off the mutable list - it might still
2189 * point to some younger objects (because we set evac_gen to 0
2192 tso->mut_link = gen->mut_list;
2193 gen->mut_list = (StgMutClosure *)tso;
2199 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2200 (StgClosure *)bh->blocking_queue =
2201 evacuate((StgClosure *)bh->blocking_queue);
2202 p->mut_link = gen->mut_list;
2207 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2210 case IND_OLDGEN_PERM:
2211 /* Try to pull the indirectee into this generation, so we can
2212 * remove the indirection from the mutable list.
2215 ((StgIndOldGen *)p)->indirectee =
2216 evacuate(((StgIndOldGen *)p)->indirectee);
2219 if (failed_to_evac) {
2220 failed_to_evac = rtsFalse;
2221 p->mut_link = gen->mut_once_list;
2222 gen->mut_once_list = p;
2229 /* shouldn't have anything else on the mutables list */
2230 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2236 scavenge_static(void)
2238 StgClosure* p = static_objects;
2239 const StgInfoTable *info;
2241 /* Always evacuate straight to the oldest generation for static
2243 evac_gen = oldest_gen->no;
2245 /* keep going until we've scavenged all the objects on the linked
2247 while (p != END_OF_STATIC_LIST) {
2251 /* make sure the info pointer is into text space */
2252 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2253 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2255 /* Take this object *off* the static_objects list,
2256 * and put it on the scavenged_static_objects list.
2258 static_objects = STATIC_LINK(info,p);
2259 STATIC_LINK(info,p) = scavenged_static_objects;
2260 scavenged_static_objects = p;
2262 switch (info -> type) {
2266 StgInd *ind = (StgInd *)p;
2267 ind->indirectee = evacuate(ind->indirectee);
2269 /* might fail to evacuate it, in which case we have to pop it
2270 * back on the mutable list (and take it off the
2271 * scavenged_static list because the static link and mut link
2272 * pointers are one and the same).
2274 if (failed_to_evac) {
2275 failed_to_evac = rtsFalse;
2276 scavenged_static_objects = STATIC_LINK(info,p);
2277 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2278 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2292 next = (P_)p->payload + info->layout.payload.ptrs;
2293 /* evacuate the pointers */
2294 for (q = (P_)p->payload; q < next; q++) {
2295 (StgClosure *)*q = evacuate((StgClosure *)*q);
2301 barf("scavenge_static");
2304 ASSERT(failed_to_evac == rtsFalse);
2306 /* get the next static object from the list. Remeber, there might
2307 * be more stuff on this list now that we've done some evacuating!
2308 * (static_objects is a global)
2314 /* -----------------------------------------------------------------------------
2315 scavenge_stack walks over a section of stack and evacuates all the
2316 objects pointed to by it. We can use the same code for walking
2317 PAPs, since these are just sections of copied stack.
2318 -------------------------------------------------------------------------- */
2321 scavenge_stack(StgPtr p, StgPtr stack_end)
2324 const StgInfoTable* info;
2328 * Each time around this loop, we are looking at a chunk of stack
2329 * that starts with either a pending argument section or an
2330 * activation record.
2333 while (p < stack_end) {
2336 /* If we've got a tag, skip over that many words on the stack */
2337 if (IS_ARG_TAG((W_)q)) {
2342 /* Is q a pointer to a closure?
2344 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2346 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2347 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
2349 /* otherwise, must be a pointer into the allocation space. */
2352 (StgClosure *)*p = evacuate((StgClosure *)q);
2358 * Otherwise, q must be the info pointer of an activation
2359 * record. All activation records have 'bitmap' style layout
2362 info = get_itbl((StgClosure *)p);
2364 switch (info->type) {
2366 /* Dynamic bitmap: the mask is stored on the stack */
2368 bitmap = ((StgRetDyn *)p)->liveness;
2369 p = (P_)&((StgRetDyn *)p)->payload[0];
2372 /* probably a slow-entry point return address: */
2378 /* Specialised code for update frames, since they're so common.
2379 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2380 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2384 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2386 nat type = get_itbl(frame->updatee)->type;
2388 p += sizeofW(StgUpdateFrame);
2389 if (type == EVACUATED) {
2390 frame->updatee = evacuate(frame->updatee);
2393 bdescr *bd = Bdescr((P_)frame->updatee);
2395 if (bd->gen->no > N) {
2396 if (bd->gen->no < evac_gen) {
2397 failed_to_evac = rtsTrue;
2402 /* Don't promote blackholes */
2404 if (!(step->gen->no == 0 &&
2406 step->no == step->gen->n_steps-1)) {
2413 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2414 sizeofW(StgHeader), step);
2415 frame->updatee = to;
2418 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2419 frame->updatee = to;
2420 recordMutable((StgMutClosure *)to);
2423 /* will never be SE_{,CAF_}BLACKHOLE, since we
2424 don't push an update frame for single-entry thunks. KSW 1999-01. */
2425 barf("scavenge_stack: UPDATE_FRAME updatee");
2430 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2437 bitmap = info->layout.bitmap;
2440 while (bitmap != 0) {
2441 if ((bitmap & 1) == 0) {
2442 (StgClosure *)*p = evacuate((StgClosure *)*p);
2445 bitmap = bitmap >> 1;
2452 /* large bitmap (> 32 entries) */
2457 StgLargeBitmap *large_bitmap;
2460 large_bitmap = info->layout.large_bitmap;
2463 for (i=0; i<large_bitmap->size; i++) {
2464 bitmap = large_bitmap->bitmap[i];
2465 q = p + sizeof(W_) * 8;
2466 while (bitmap != 0) {
2467 if ((bitmap & 1) == 0) {
2468 (StgClosure *)*p = evacuate((StgClosure *)*p);
2471 bitmap = bitmap >> 1;
2473 if (i+1 < large_bitmap->size) {
2475 (StgClosure *)*p = evacuate((StgClosure *)*p);
2481 /* and don't forget to follow the SRT */
2486 barf("scavenge_stack: weird activation record found on stack.\n");
2491 /*-----------------------------------------------------------------------------
2492 scavenge the large object list.
2494 evac_gen set by caller; similar games played with evac_gen as with
2495 scavenge() - see comment at the top of scavenge(). Most large
2496 objects are (repeatedly) mutable, so most of the time evac_gen will
2498 --------------------------------------------------------------------------- */
2501 scavenge_large(step *step)
2505 const StgInfoTable* info;
2506 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2508 evac_gen = 0; /* most objects are mutable */
2509 bd = step->new_large_objects;
2511 for (; bd != NULL; bd = step->new_large_objects) {
2513 /* take this object *off* the large objects list and put it on
2514 * the scavenged large objects list. This is so that we can
2515 * treat new_large_objects as a stack and push new objects on
2516 * the front when evacuating.
2518 step->new_large_objects = bd->link;
2519 dbl_link_onto(bd, &step->scavenged_large_objects);
2522 info = get_itbl(stgCast(StgClosure*,p));
2524 switch (info->type) {
2526 /* only certain objects can be "large"... */
2529 /* nothing to follow */
2533 /* follow everything */
2537 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2538 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2539 (StgClosure *)*p = evacuate((StgClosure *)*p);
2544 case MUT_ARR_PTRS_FROZEN:
2545 /* follow everything */
2547 StgPtr start = p, next;
2549 evac_gen = saved_evac_gen; /* not really mutable */
2550 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2551 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2552 (StgClosure *)*p = evacuate((StgClosure *)*p);
2555 if (failed_to_evac) {
2556 recordMutable((StgMutClosure *)start);
2563 StgBCO* bco = stgCast(StgBCO*,p);
2565 evac_gen = saved_evac_gen;
2566 for (i = 0; i < bco->n_ptrs; i++) {
2567 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2578 /* chase the link field for any TSOs on the same queue */
2579 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2580 if ( tso->why_blocked == BlockedOnMVar
2581 || tso->why_blocked == BlockedOnBlackHole) {
2582 tso->block_info.closure = evacuate(tso->block_info.closure);
2584 /* scavenge this thread's stack */
2585 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2590 barf("scavenge_large: unknown/strange object");
2596 zero_static_object_list(StgClosure* first_static)
2600 const StgInfoTable *info;
2602 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2604 link = STATIC_LINK(info, p);
2605 STATIC_LINK(info,p) = NULL;
2609 /* This function is only needed because we share the mutable link
2610 * field with the static link field in an IND_STATIC, so we have to
2611 * zero the mut_link field before doing a major GC, which needs the
2612 * static link field.
2614 * It doesn't do any harm to zero all the mutable link fields on the
2618 zero_mutable_list( StgMutClosure *first )
2620 StgMutClosure *next, *c;
2622 for (c = first; c != END_MUT_LIST; c = next) {
2628 /* -----------------------------------------------------------------------------
2630 -------------------------------------------------------------------------- */
2632 void RevertCAFs(void)
2634 while (enteredCAFs != END_CAF_LIST) {
2635 StgCAF* caf = enteredCAFs;
2637 enteredCAFs = caf->link;
2638 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2639 SET_INFO(caf,&CAF_UNENTERED_info);
2640 caf->value = stgCast(StgClosure*,0xdeadbeef);
2641 caf->link = stgCast(StgCAF*,0xdeadbeef);
2643 enteredCAFs = END_CAF_LIST;
2646 void revert_dead_CAFs(void)
2648 StgCAF* caf = enteredCAFs;
2649 enteredCAFs = END_CAF_LIST;
2650 while (caf != END_CAF_LIST) {
2653 new = (StgCAF*)isAlive((StgClosure*)caf);
2655 new->link = enteredCAFs;
2659 SET_INFO(caf,&CAF_UNENTERED_info);
2660 caf->value = (StgClosure*)0xdeadbeef;
2661 caf->link = (StgCAF*)0xdeadbeef;
2667 /* -----------------------------------------------------------------------------
2668 Sanity code for CAF garbage collection.
2670 With DEBUG turned on, we manage a CAF list in addition to the SRT
2671 mechanism. After GC, we run down the CAF list and blackhole any
2672 CAFs which have been garbage collected. This means we get an error
2673 whenever the program tries to enter a garbage collected CAF.
2675 Any garbage collected CAFs are taken off the CAF list at the same
2677 -------------------------------------------------------------------------- */
2685 const StgInfoTable *info;
2696 ASSERT(info->type == IND_STATIC);
2698 if (STATIC_LINK(info,p) == NULL) {
2699 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2701 SET_INFO(p,&BLACKHOLE_info);
2702 p = STATIC_LINK2(info,p);
2706 pp = &STATIC_LINK2(info,p);
2713 /* fprintf(stderr, "%d CAFs live\n", i); */
2717 /* -----------------------------------------------------------------------------
2720 Whenever a thread returns to the scheduler after possibly doing
2721 some work, we have to run down the stack and black-hole all the
2722 closures referred to by update frames.
2723 -------------------------------------------------------------------------- */
2726 threadLazyBlackHole(StgTSO *tso)
2728 StgUpdateFrame *update_frame;
2729 StgBlockingQueue *bh;
2732 stack_end = &tso->stack[tso->stack_size];
2733 update_frame = tso->su;
2736 switch (get_itbl(update_frame)->type) {
2739 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2743 bh = (StgBlockingQueue *)update_frame->updatee;
2745 /* if the thunk is already blackholed, it means we've also
2746 * already blackholed the rest of the thunks on this stack,
2747 * so we can stop early.
2749 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
2750 * don't interfere with this optimisation.
2752 if (bh->header.info == &BLACKHOLE_info) {
2756 if (bh->header.info != &BLACKHOLE_BQ_info &&
2757 bh->header.info != &CAF_BLACKHOLE_info) {
2758 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
2759 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
2761 SET_INFO(bh,&BLACKHOLE_info);
2764 update_frame = update_frame->link;
2768 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2774 barf("threadPaused");
2779 /* -----------------------------------------------------------------------------
2782 * Code largely pinched from old RTS, then hacked to bits. We also do
2783 * lazy black holing here.
2785 * -------------------------------------------------------------------------- */
2788 threadSqueezeStack(StgTSO *tso)
2790 lnat displacement = 0;
2791 StgUpdateFrame *frame;
2792 StgUpdateFrame *next_frame; /* Temporally next */
2793 StgUpdateFrame *prev_frame; /* Temporally previous */
2795 rtsBool prev_was_update_frame;
2797 bottom = &(tso->stack[tso->stack_size]);
2800 /* There must be at least one frame, namely the STOP_FRAME.
2802 ASSERT((P_)frame < bottom);
2804 /* Walk down the stack, reversing the links between frames so that
2805 * we can walk back up as we squeeze from the bottom. Note that
2806 * next_frame and prev_frame refer to next and previous as they were
2807 * added to the stack, rather than the way we see them in this
2808 * walk. (It makes the next loop less confusing.)
2810 * Stop if we find an update frame pointing to a black hole
2811 * (see comment in threadLazyBlackHole()).
2815 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
2816 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
2817 prev_frame = frame->link;
2818 frame->link = next_frame;
2821 if (get_itbl(frame)->type == UPDATE_FRAME
2822 && frame->updatee->header.info == &BLACKHOLE_info) {
2827 /* Now, we're at the bottom. Frame points to the lowest update
2828 * frame on the stack, and its link actually points to the frame
2829 * above. We have to walk back up the stack, squeezing out empty
2830 * update frames and turning the pointers back around on the way
2833 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2834 * we never want to eliminate it anyway. Just walk one step up
2835 * before starting to squeeze. When you get to the topmost frame,
2836 * remember that there are still some words above it that might have
2843 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2846 * Loop through all of the frames (everything except the very
2847 * bottom). Things are complicated by the fact that we have
2848 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2849 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2851 while (frame != NULL) {
2853 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2854 rtsBool is_update_frame;
2856 next_frame = frame->link;
2857 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2860 * 1. both the previous and current frame are update frames
2861 * 2. the current frame is empty
2863 if (prev_was_update_frame && is_update_frame &&
2864 (P_)prev_frame == frame_bottom + displacement) {
2866 /* Now squeeze out the current frame */
2867 StgClosure *updatee_keep = prev_frame->updatee;
2868 StgClosure *updatee_bypass = frame->updatee;
2871 fprintf(stderr, "squeezing frame at %p\n", frame);
2874 /* Deal with blocking queues. If both updatees have blocked
2875 * threads, then we should merge the queues into the update
2876 * frame that we're keeping.
2878 * Alternatively, we could just wake them up: they'll just go
2879 * straight to sleep on the proper blackhole! This is less code
2880 * and probably less bug prone, although it's probably much
2883 #if 0 /* do it properly... */
2884 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
2885 # error Unimplemented lazy BH warning. (KSW 1999-01)
2887 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
2888 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
2890 /* Sigh. It has one. Don't lose those threads! */
2891 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2892 /* Urgh. Two queues. Merge them. */
2893 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2895 while (keep_tso->link != END_TSO_QUEUE) {
2896 keep_tso = keep_tso->link;
2898 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2901 /* For simplicity, just swap the BQ for the BH */
2902 P_ temp = updatee_keep;
2904 updatee_keep = updatee_bypass;
2905 updatee_bypass = temp;
2907 /* Record the swap in the kept frame (below) */
2908 prev_frame->updatee = updatee_keep;
2913 TICK_UPD_SQUEEZED();
2914 /* wasn't there something about update squeezing and ticky to be
2915 * sorted out? oh yes: we aren't counting each enter properly
2916 * in this case. See the log somewhere. KSW 1999-04-21
2918 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
2920 sp = (P_)frame - 1; /* sp = stuff to slide */
2921 displacement += sizeofW(StgUpdateFrame);
2924 /* No squeeze for this frame */
2925 sp = frame_bottom - 1; /* Keep the current frame */
2927 /* Do lazy black-holing.
2929 if (is_update_frame) {
2930 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2931 if (bh->header.info != &BLACKHOLE_info &&
2932 bh->header.info != &BLACKHOLE_BQ_info &&
2933 bh->header.info != &CAF_BLACKHOLE_info) {
2934 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
2935 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
2937 SET_INFO(bh,&BLACKHOLE_info);
2941 /* Fix the link in the current frame (should point to the frame below) */
2942 frame->link = prev_frame;
2943 prev_was_update_frame = is_update_frame;
2946 /* Now slide all words from sp up to the next frame */
2948 if (displacement > 0) {
2949 P_ next_frame_bottom;
2951 if (next_frame != NULL)
2952 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2954 next_frame_bottom = tso->sp - 1;
2957 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2961 while (sp >= next_frame_bottom) {
2962 sp[displacement] = *sp;
2966 (P_)prev_frame = (P_)frame + displacement;
2970 tso->sp += displacement;
2971 tso->su = prev_frame;
2974 /* -----------------------------------------------------------------------------
2977 * We have to prepare for GC - this means doing lazy black holing
2978 * here. We also take the opportunity to do stack squeezing if it's
2980 * -------------------------------------------------------------------------- */
2983 threadPaused(StgTSO *tso)
2985 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2986 threadSqueezeStack(tso); /* does black holing too */
2988 threadLazyBlackHole(tso);