1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.22 1999/01/28 15:04:00 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
23 #include "StablePriv.h"
27 /* STATIC OBJECT LIST.
30 * We maintain a linked list of static objects that are still live.
31 * The requirements for this list are:
33 * - we need to scan the list while adding to it, in order to
34 * scavenge all the static objects (in the same way that
35 * breadth-first scavenging works for dynamic objects).
37 * - we need to be able to tell whether an object is already on
38 * the list, to break loops.
40 * Each static object has a "static link field", which we use for
41 * linking objects on to the list. We use a stack-type list, consing
42 * objects on the front as they are added (this means that the
43 * scavenge phase is depth-first, not breadth-first, but that
46 * A separate list is kept for objects that have been scavenged
47 * already - this is so that we can zero all the marks afterwards.
49 * An object is on the list if its static link field is non-zero; this
50 * means that we have to mark the end of the list with '1', not NULL.
52 * Extra notes for generational GC:
54 * Each generation has a static object list associated with it. When
55 * collecting generations up to N, we treat the static object lists
56 * from generations > N as roots.
58 * We build up a static object list while collecting generations 0..N,
59 * which is then appended to the static object list of generation N+1.
61 StgClosure* static_objects; /* live static objects */
62 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
64 /* N is the oldest generation being collected, where the generations
65 * are numbered starting at 0. A major GC (indicated by the major_gc
66 * flag) is when we're collecting all generations. We only attempt to
67 * deal with static objects and GC CAFs when doing a major GC.
70 static rtsBool major_gc;
72 /* Youngest generation that objects should be evacuated to in
73 * evacuate(). (Logically an argument to evacuate, but it's static
74 * a lot of the time so we optimise it into a global variable).
80 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
81 static rtsBool weak_done; /* all done for this pass */
83 /* Flag indicating failure to evacuate an object to the desired
86 static rtsBool failed_to_evac;
88 /* Old to-space (used for two-space collector only)
92 /* -----------------------------------------------------------------------------
93 Static function declarations
94 -------------------------------------------------------------------------- */
96 static StgClosure *evacuate(StgClosure *q);
97 static void zeroStaticObjectList(StgClosure* first_static);
98 static rtsBool traverse_weak_ptr_list(void);
99 static void zeroMutableList(StgMutClosure *first);
100 static void revertDeadCAFs(void);
102 static void scavenge_stack(StgPtr p, StgPtr stack_end);
103 static void scavenge_large(step *step);
104 static void scavenge(step *step);
105 static void scavenge_static(void);
106 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
109 static void gcCAFs(void);
112 /* -----------------------------------------------------------------------------
115 For garbage collecting generation N (and all younger generations):
117 - follow all pointers in the root set. the root set includes all
118 mutable objects in all steps in all generations.
120 - for each pointer, evacuate the object it points to into either
121 + to-space in the next higher step in that generation, if one exists,
122 + if the object's generation == N, then evacuate it to the next
123 generation if one exists, or else to-space in the current
125 + if the object's generation < N, then evacuate it to to-space
126 in the next generation.
128 - repeatedly scavenge to-space from each step in each generation
129 being collected until no more objects can be evacuated.
131 - free from-space in each step, and set from-space = to-space.
133 -------------------------------------------------------------------------- */
135 void GarbageCollect(void (*get_roots)(void))
139 lnat live, allocated, collected = 0;
143 CostCentreStack *prev_CCS;
146 /* tell the stats department that we've started a GC */
149 /* attribute any costs to CCS_GC */
155 /* We might have been called from Haskell land by _ccall_GC, in
156 * which case we need to call threadPaused() because the scheduler
157 * won't have done it.
159 if (CurrentTSO) { threadPaused(CurrentTSO); }
161 /* Approximate how much we allocated: number of blocks in the
162 * nursery + blocks allocated via allocate() - unused nusery blocks.
163 * This leaves a little slop at the end of each block, and doesn't
164 * take into account large objects (ToDo).
166 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
167 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
168 allocated -= BLOCK_SIZE_W;
171 /* Figure out which generation to collect
174 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
175 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
179 major_gc = (N == RtsFlags.GcFlags.generations-1);
181 /* check stack sanity *before* GC (ToDo: check all threads) */
182 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
183 IF_DEBUG(sanity, checkFreeListSanity());
185 /* Initialise the static object lists
187 static_objects = END_OF_STATIC_LIST;
188 scavenged_static_objects = END_OF_STATIC_LIST;
190 /* zero the mutable list for the oldest generation (see comment by
191 * zeroMutableList below).
194 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
197 /* Save the old to-space if we're doing a two-space collection
199 if (RtsFlags.GcFlags.generations == 1) {
200 old_to_space = g0s0->to_space;
201 g0s0->to_space = NULL;
204 /* Initialise to-space in all the generations/steps that we're
207 for (g = 0; g <= N; g++) {
208 generations[g].mut_list = END_MUT_LIST;
210 for (s = 0; s < generations[g].n_steps; s++) {
212 /* generation 0, step 0 doesn't need to-space */
213 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
217 /* Get a free block for to-space. Extra blocks will be chained on
221 step = &generations[g].steps[s];
222 ASSERT(step->gen->no == g);
223 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
224 bd->gen = &generations[g];
227 bd->evacuated = 1; /* it's a to-space block */
228 step->hp = bd->start;
229 step->hpLim = step->hp + BLOCK_SIZE_W;
232 step->to_blocks = 1; /* ???? */
233 step->scan = bd->start;
235 step->new_large_objects = NULL;
236 step->scavenged_large_objects = NULL;
237 /* mark the large objects as not evacuated yet */
238 for (bd = step->large_objects; bd; bd = bd->link) {
244 /* make sure the older generations have at least one block to
245 * allocate into (this makes things easier for copy(), see below.
247 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
248 for (s = 0; s < generations[g].n_steps; s++) {
249 step = &generations[g].steps[s];
250 if (step->hp_bd == NULL) {
252 bd->gen = &generations[g];
255 bd->evacuated = 0; /* *not* a to-space block */
256 step->hp = bd->start;
257 step->hpLim = step->hp + BLOCK_SIZE_W;
262 /* Set the scan pointer for older generations: remember we
263 * still have to scavenge objects that have been promoted. */
264 step->scan = step->hp;
265 step->scan_bd = step->hp_bd;
266 step->to_space = NULL;
268 step->new_large_objects = NULL;
269 step->scavenged_large_objects = NULL;
273 /* -----------------------------------------------------------------------
274 * follow all the roots that we know about:
275 * - mutable lists from each generation > N
276 * we want to *scavenge* these roots, not evacuate them: they're not
277 * going to move in this GC.
278 * Also: do them in reverse generation order. This is because we
279 * often want to promote objects that are pointed to by older
280 * generations early, so we don't have to repeatedly copy them.
281 * Doing the generations in reverse order ensures that we don't end
282 * up in the situation where we want to evac an object to gen 3 and
283 * it has already been evaced to gen 2.
286 StgMutClosure *tmp, **pp;
287 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
288 generations[g].saved_mut_list = generations[g].mut_list;
289 generations[g].mut_list = END_MUT_LIST;
292 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
293 tmp = scavenge_mutable_list(generations[g].saved_mut_list, g);
294 pp = &generations[g].mut_list;
295 while (*pp != END_MUT_LIST) {
296 pp = &(*pp)->mut_link;
302 /* follow all the roots that the application knows about.
307 /* And don't forget to mark the TSO if we got here direct from
310 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
313 /* Mark the weak pointer list, and prepare to detect dead weak
317 old_weak_ptr_list = weak_ptr_list;
318 weak_ptr_list = NULL;
319 weak_done = rtsFalse;
321 /* Mark the stable pointer table.
323 markStablePtrTable(major_gc);
327 /* ToDo: To fix the caf leak, we need to make the commented out
328 * parts of this code do something sensible - as described in
331 extern void markHugsObjects(void);
333 /* ToDo: This (undefined) function should contain the scavenge
334 * loop immediately below this block of code - but I'm not sure
335 * enough of the details to do this myself.
337 scavengeEverything();
338 /* revert dead CAFs and update enteredCAFs list */
343 /* This will keep the CAFs and the attached BCOs alive
344 * but the values will have been reverted
346 scavengeEverything();
351 /* -------------------------------------------------------------------------
352 * Repeatedly scavenge all the areas we know about until there's no
353 * more scavenging to be done.
360 /* scavenge static objects */
361 if (major_gc && static_objects != END_OF_STATIC_LIST) {
365 /* When scavenging the older generations: Objects may have been
366 * evacuated from generations <= N into older generations, and we
367 * need to scavenge these objects. We're going to try to ensure that
368 * any evacuations that occur move the objects into at least the
369 * same generation as the object being scavenged, otherwise we
370 * have to create new entries on the mutable list for the older
374 /* scavenge each step in generations 0..maxgen */
377 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
378 for (s = 0; s < generations[gen].n_steps; s++) {
379 step = &generations[gen].steps[s];
381 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
385 if (step->new_large_objects != NULL) {
386 scavenge_large(step);
392 if (flag) { goto loop; }
394 /* must be last... */
395 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
400 /* Now see which stable names are still alive
402 gcStablePtrTable(major_gc);
404 /* Set the maximum blocks for the oldest generation, based on twice
405 * the amount of live data now, adjusted to fit the maximum heap
408 * This is an approximation, since in the worst case we'll need
409 * twice the amount of live data plus whatever space the other
412 if (RtsFlags.GcFlags.generations > 1) {
414 oldest_gen->max_blocks =
415 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
416 RtsFlags.GcFlags.minOldGenSize);
417 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
418 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
419 if (((int)oldest_gen->max_blocks -
420 (int)oldest_gen->steps[0].to_blocks) <
421 (RtsFlags.GcFlags.pcFreeHeap *
422 RtsFlags.GcFlags.maxHeapSize / 200)) {
429 /* run through all the generations/steps and tidy up
431 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
434 generations[g].collections++; /* for stats */
437 for (s = 0; s < generations[g].n_steps; s++) {
439 step = &generations[g].steps[s];
441 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
442 /* Tidy the end of the to-space chains */
443 step->hp_bd->free = step->hp;
444 step->hp_bd->link = NULL;
447 /* for generations we collected... */
450 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
452 /* free old memory and shift to-space into from-space for all
453 * the collected steps (except the allocation area). These
454 * freed blocks will probaby be quickly recycled.
456 if (!(g == 0 && s == 0)) {
457 freeChain(step->blocks);
458 step->blocks = step->to_space;
459 step->n_blocks = step->to_blocks;
460 step->to_space = NULL;
462 for (bd = step->blocks; bd != NULL; bd = bd->link) {
463 bd->evacuated = 0; /* now from-space */
467 /* LARGE OBJECTS. The current live large objects are chained on
468 * scavenged_large, having been moved during garbage
469 * collection from large_objects. Any objects left on
470 * large_objects list are therefore dead, so we free them here.
472 for (bd = step->large_objects; bd != NULL; bd = next) {
477 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
480 step->large_objects = step->scavenged_large_objects;
482 /* Set the maximum blocks for this generation, interpolating
483 * between the maximum size of the oldest and youngest
486 * max_blocks = oldgen_max_blocks * G
487 * ----------------------
491 generations[g].max_blocks = (oldest_gen->max_blocks * g)
492 / (RtsFlags.GcFlags.generations-1);
495 /* for older generations... */
498 /* For older generations, we need to append the
499 * scavenged_large_object list (i.e. large objects that have been
500 * promoted during this GC) to the large_object list for that step.
502 for (bd = step->scavenged_large_objects; bd; bd = next) {
505 dbl_link_onto(bd, &step->large_objects);
508 /* add the new blocks we promoted during this GC */
509 step->n_blocks += step->to_blocks;
514 /* Guess the amount of live data for stats. */
517 /* Two-space collector:
518 * Free the old to-space, and estimate the amount of live data.
520 if (RtsFlags.GcFlags.generations == 1) {
523 if (old_to_space != NULL) {
524 freeChain(old_to_space);
526 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
527 bd->evacuated = 0; /* now from-space */
530 /* For a two-space collector, we need to resize the nursery. */
532 /* set up a new nursery. Allocate a nursery size based on a
533 * function of the amount of live data (currently a factor of 2,
534 * should be configurable (ToDo)). Use the blocks from the old
535 * nursery if possible, freeing up any left over blocks.
537 * If we get near the maximum heap size, then adjust our nursery
538 * size accordingly. If the nursery is the same size as the live
539 * data (L), then we need 3L bytes. We can reduce the size of the
540 * nursery to bring the required memory down near 2L bytes.
542 * A normal 2-space collector would need 4L bytes to give the same
543 * performance we get from 3L bytes, reducing to the same
544 * performance at 2L bytes.
546 blocks = g0s0->n_blocks;
548 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
549 RtsFlags.GcFlags.maxHeapSize ) {
550 int adjusted_blocks; /* signed on purpose */
553 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
554 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
555 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
556 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
559 blocks = adjusted_blocks;
562 blocks *= RtsFlags.GcFlags.oldGenFactor;
563 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
564 blocks = RtsFlags.GcFlags.minAllocAreaSize;
567 resizeNursery(blocks);
570 /* Generational collector:
571 * If the user has given us a suggested heap size, adjust our
572 * allocation area to make best use of the memory available.
575 if (RtsFlags.GcFlags.heapSizeSuggestion) {
577 nat needed = calcNeeded(); /* approx blocks needed at next GC */
579 /* Guess how much will be live in generation 0 step 0 next time.
580 * A good approximation is the amount of data that was live this
581 * time: this assumes (1) that the size of G0S0 will be roughly
582 * the same as last time, and (2) that the promotion rate will be
585 * If we don't know how much was live in G0S0 (because there's no
586 * step 1), then assume 30% (which is usually an overestimate).
588 if (g0->n_steps == 1) {
589 needed += (g0s0->n_blocks * 30) / 100;
591 needed += g0->steps[1].n_blocks;
594 /* Now we have a rough guess at the number of blocks needed for
595 * the next GC, subtract this from the user's suggested heap size
596 * and use the rest for the allocation area.
598 blocks = (int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed;
600 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
601 blocks = RtsFlags.GcFlags.minAllocAreaSize;
604 resizeNursery((nat)blocks);
608 /* revert dead CAFs and update enteredCAFs list */
611 /* mark the garbage collected CAFs as dead */
613 if (major_gc) { gcCAFs(); }
616 /* zero the scavenged static object list */
618 zeroStaticObjectList(scavenged_static_objects);
623 for (bd = g0s0->blocks; bd; bd = bd->link) {
624 bd->free = bd->start;
625 ASSERT(bd->gen == g0);
626 ASSERT(bd->step == g0s0);
628 current_nursery = g0s0->blocks;
630 /* Free the small objects allocated via allocate(), since this will
631 * all have been copied into G0S1 now.
633 if (small_alloc_list != NULL) {
634 freeChain(small_alloc_list);
636 small_alloc_list = NULL;
638 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
640 /* start any pending finalisers */
641 scheduleFinalisers(old_weak_ptr_list);
643 /* check sanity after GC */
644 IF_DEBUG(sanity, checkSanity(N));
646 /* extra GC trace info */
647 IF_DEBUG(gc, stat_describe_gens());
650 /* symbol-table based profiling */
651 /* heapCensus(to_space); */ /* ToDo */
654 /* restore enclosing cost centre */
659 /* check for memory leaks if sanity checking is on */
660 IF_DEBUG(sanity, memInventory());
662 /* ok, GC over: tell the stats department what happened. */
663 stat_endGC(allocated, collected, live, N);
666 /* -----------------------------------------------------------------------------
669 traverse_weak_ptr_list is called possibly many times during garbage
670 collection. It returns a flag indicating whether it did any work
671 (i.e. called evacuate on any live pointers).
673 Invariant: traverse_weak_ptr_list is called when the heap is in an
674 idempotent state. That means that there are no pending
675 evacuate/scavenge operations. This invariant helps the weak
676 pointer code decide which weak pointers are dead - if there are no
677 new live weak pointers, then all the currently unreachable ones are
680 For generational GC: we just don't try to finalise weak pointers in
681 older generations than the one we're collecting. This could
682 probably be optimised by keeping per-generation lists of weak
683 pointers, but for a few weak pointers this scheme will work.
684 -------------------------------------------------------------------------- */
687 traverse_weak_ptr_list(void)
689 StgWeak *w, **last_w, *next_w;
691 rtsBool flag = rtsFalse;
693 if (weak_done) { return rtsFalse; }
695 /* doesn't matter where we evacuate values/finalisers to, since
696 * these pointers are treated as roots (iff the keys are alive).
700 last_w = &old_weak_ptr_list;
701 for (w = old_weak_ptr_list; w; w = next_w) {
703 if ((new = isAlive(w->key))) {
705 /* evacuate the value and finaliser */
706 w->value = evacuate(w->value);
707 w->finaliser = evacuate(w->finaliser);
708 /* remove this weak ptr from the old_weak_ptr list */
710 /* and put it on the new weak ptr list */
712 w->link = weak_ptr_list;
715 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
725 /* If we didn't make any changes, then we can go round and kill all
726 * the dead weak pointers. The old_weak_ptr list is used as a list
727 * of pending finalisers later on.
729 if (flag == rtsFalse) {
730 for (w = old_weak_ptr_list; w; w = w->link) {
731 w->value = evacuate(w->value);
732 w->finaliser = evacuate(w->finaliser);
740 /* -----------------------------------------------------------------------------
741 isAlive determines whether the given closure is still alive (after
742 a garbage collection) or not. It returns the new address of the
743 closure if it is alive, or NULL otherwise.
744 -------------------------------------------------------------------------- */
747 isAlive(StgClosure *p)
755 /* ToDo: for static closures, check the static link field.
756 * Problem here is that we sometimes don't set the link field, eg.
757 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
760 /* ignore closures in generations that we're not collecting. */
761 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
765 switch (info->type) {
770 case IND_OLDGEN: /* rely on compatible layout with StgInd */
771 case IND_OLDGEN_PERM:
772 /* follow indirections */
773 p = ((StgInd *)p)->indirectee;
778 return ((StgEvacuated *)p)->evacuee;
788 MarkRoot(StgClosure *root)
790 return evacuate(root);
793 static void addBlock(step *step)
795 bdescr *bd = allocBlock();
799 if (step->gen->no <= N) {
805 step->hp_bd->free = step->hp;
806 step->hp_bd->link = bd;
807 step->hp = bd->start;
808 step->hpLim = step->hp + BLOCK_SIZE_W;
813 static __inline__ StgClosure *
814 copy(StgClosure *src, nat size, step *step)
818 /* Find out where we're going, using the handy "to" pointer in
819 * the step of the source object. If it turns out we need to
820 * evacuate to an older generation, adjust it here (see comment
823 if (step->gen->no < evac_gen) {
824 step = &generations[evac_gen].steps[0];
827 /* chain a new block onto the to-space for the destination step if
830 if (step->hp + size >= step->hpLim) {
834 for(to = step->hp, from = (P_)src; size>0; --size) {
840 return (StgClosure *)dest;
843 /* Special version of copy() for when we only want to copy the info
844 * pointer of an object, but reserve some padding after it. This is
845 * used to optimise evacuation of BLACKHOLEs.
848 static __inline__ StgClosure *
849 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
853 if (step->gen->no < evac_gen) {
854 step = &generations[evac_gen].steps[0];
857 if (step->hp + size_to_reserve >= step->hpLim) {
861 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
866 step->hp += size_to_reserve;
867 return (StgClosure *)dest;
870 static __inline__ void
871 upd_evacuee(StgClosure *p, StgClosure *dest)
873 StgEvacuated *q = (StgEvacuated *)p;
875 SET_INFO(q,&EVACUATED_info);
879 /* -----------------------------------------------------------------------------
880 Evacuate a mutable object
882 If we evacuate a mutable object to an old generation, cons the
883 object onto the older generation's mutable list.
884 -------------------------------------------------------------------------- */
887 evacuate_mutable(StgMutClosure *c)
892 if (bd->gen->no > 0) {
893 c->mut_link = bd->gen->mut_list;
894 bd->gen->mut_list = c;
898 /* -----------------------------------------------------------------------------
899 Evacuate a large object
901 This just consists of removing the object from the (doubly-linked)
902 large_alloc_list, and linking it on to the (singly-linked)
903 new_large_objects list, from where it will be scavenged later.
905 Convention: bd->evacuated is /= 0 for a large object that has been
906 evacuated, or 0 otherwise.
907 -------------------------------------------------------------------------- */
910 evacuate_large(StgPtr p, rtsBool mutable)
912 bdescr *bd = Bdescr(p);
915 /* should point to the beginning of the block */
916 ASSERT(((W_)p & BLOCK_MASK) == 0);
918 /* already evacuated? */
920 /* Don't forget to set the failed_to_evac flag if we didn't get
921 * the desired destination (see comments in evacuate()).
923 if (bd->gen->no < evac_gen) {
924 failed_to_evac = rtsTrue;
925 TICK_GC_FAILED_PROMOTION();
931 /* remove from large_object list */
933 bd->back->link = bd->link;
934 } else { /* first object in the list */
935 step->large_objects = bd->link;
938 bd->link->back = bd->back;
941 /* link it on to the evacuated large object list of the destination step
944 if (step->gen->no < evac_gen) {
945 step = &generations[evac_gen].steps[0];
950 bd->link = step->new_large_objects;
951 step->new_large_objects = bd;
955 evacuate_mutable((StgMutClosure *)p);
959 /* -----------------------------------------------------------------------------
960 Adding a MUT_CONS to an older generation.
962 This is necessary from time to time when we end up with an
963 old-to-new generation pointer in a non-mutable object. We defer
964 the promotion until the next GC.
965 -------------------------------------------------------------------------- */
968 mkMutCons(StgClosure *ptr, generation *gen)
973 step = &gen->steps[0];
975 /* chain a new block onto the to-space for the destination step if
978 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
982 q = (StgMutVar *)step->hp;
983 step->hp += sizeofW(StgMutVar);
985 SET_HDR(q,&MUT_CONS_info,CCS_GC);
987 evacuate_mutable((StgMutClosure *)q);
989 return (StgClosure *)q;
992 /* -----------------------------------------------------------------------------
995 This is called (eventually) for every live object in the system.
997 The caller to evacuate specifies a desired generation in the
998 evac_gen global variable. The following conditions apply to
999 evacuating an object which resides in generation M when we're
1000 collecting up to generation N
1004 else evac to step->to
1006 if M < evac_gen evac to evac_gen, step 0
1008 if the object is already evacuated, then we check which generation
1011 if M >= evac_gen do nothing
1012 if M < evac_gen set failed_to_evac flag to indicate that we
1013 didn't manage to evacuate this object into evac_gen.
1015 -------------------------------------------------------------------------- */
1019 evacuate(StgClosure *q)
1024 const StgInfoTable *info;
1027 if (!LOOKS_LIKE_STATIC(q)) {
1029 if (bd->gen->no > N) {
1030 /* Can't evacuate this object, because it's in a generation
1031 * older than the ones we're collecting. Let's hope that it's
1032 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1034 if (bd->gen->no < evac_gen) {
1036 failed_to_evac = rtsTrue;
1037 TICK_GC_FAILED_PROMOTION();
1041 step = bd->step->to;
1044 /* make sure the info pointer is into text space */
1045 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1046 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1049 switch (info -> type) {
1052 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),step);
1058 to = copy(q,sizeW_fromITBL(info),step);
1060 evacuate_mutable((StgMutClosure *)to);
1064 stable_ptr_table[((StgStableName *)q)->sn].keep = rtsTrue;
1065 to = copy(q,sizeofW(StgStableName),step);
1073 to = copy(q,sizeofW(StgHeader)+1,step);
1077 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1088 to = copy(q,sizeofW(StgHeader)+2,step);
1096 case IND_OLDGEN_PERM:
1101 to = copy(q,sizeW_fromITBL(info),step);
1107 to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1112 to = copy(q,BLACKHOLE_sizeW(),step);
1114 evacuate_mutable((StgMutClosure *)to);
1117 case THUNK_SELECTOR:
1119 const StgInfoTable* selectee_info;
1120 StgClosure* selectee = ((StgSelector*)q)->selectee;
1123 selectee_info = get_itbl(selectee);
1124 switch (selectee_info->type) {
1133 StgNat32 offset = info->layout.selector_offset;
1135 /* check that the size is in range */
1137 (StgNat32)(selectee_info->layout.payload.ptrs +
1138 selectee_info->layout.payload.nptrs));
1140 /* perform the selection! */
1141 q = selectee->payload[offset];
1143 /* if we're already in to-space, there's no need to continue
1144 * with the evacuation, just update the source address with
1145 * a pointer to the (evacuated) constructor field.
1147 if (IS_USER_PTR(q)) {
1148 bdescr *bd = Bdescr((P_)q);
1149 if (bd->evacuated) {
1150 if (bd->gen->no < evac_gen) {
1151 failed_to_evac = rtsTrue;
1152 TICK_GC_FAILED_PROMOTION();
1158 /* otherwise, carry on and evacuate this constructor field,
1159 * (but not the constructor itself)
1168 case IND_OLDGEN_PERM:
1169 selectee = stgCast(StgInd *,selectee)->indirectee;
1173 selectee = stgCast(StgCAF *,selectee)->value;
1177 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1187 case THUNK_SELECTOR:
1188 /* aargh - do recursively???? */
1193 /* not evaluated yet */
1197 barf("evacuate: THUNK_SELECTOR: strange selectee");
1200 to = copy(q,THUNK_SELECTOR_sizeW(),step);
1206 /* follow chains of indirections, don't evacuate them */
1207 q = ((StgInd*)q)->indirectee;
1210 /* ToDo: optimise STATIC_LINK for known cases.
1211 - FUN_STATIC : payload[0]
1212 - THUNK_STATIC : payload[1]
1213 - IND_STATIC : payload[1]
1217 if (info->srt_len == 0) { /* small optimisation */
1223 /* don't want to evacuate these, but we do want to follow pointers
1224 * from SRTs - see scavenge_static.
1227 /* put the object on the static list, if necessary.
1229 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1230 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1231 static_objects = (StgClosure *)q;
1235 case CONSTR_INTLIKE:
1236 case CONSTR_CHARLIKE:
1237 case CONSTR_NOCAF_STATIC:
1238 /* no need to put these on the static linked list, they don't need
1253 /* shouldn't see these */
1254 barf("evacuate: stack frame\n");
1258 /* these are special - the payload is a copy of a chunk of stack,
1260 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1265 /* Already evacuated, just return the forwarding address.
1266 * HOWEVER: if the requested destination generation (evac_gen) is
1267 * older than the actual generation (because the object was
1268 * already evacuated to a younger generation) then we have to
1269 * set the failed_to_evac flag to indicate that we couldn't
1270 * manage to promote the object to the desired generation.
1272 if (evac_gen > 0) { /* optimisation */
1273 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1274 if (Bdescr((P_)p)->gen->no < evac_gen) {
1275 /* fprintf(stderr,"evac failed!\n");*/
1276 failed_to_evac = rtsTrue;
1277 TICK_GC_FAILED_PROMOTION();
1280 return ((StgEvacuated*)q)->evacuee;
1285 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1287 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1288 evacuate_large((P_)q, rtsFalse);
1291 /* just copy the block */
1292 to = copy(q,size,step);
1299 case MUT_ARR_PTRS_FROZEN:
1301 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1303 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1304 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1307 /* just copy the block */
1308 to = copy(q,size,step);
1310 if (info->type == MUT_ARR_PTRS) {
1311 evacuate_mutable((StgMutClosure *)to);
1319 StgTSO *tso = stgCast(StgTSO *,q);
1320 nat size = tso_sizeW(tso);
1323 /* Large TSOs don't get moved, so no relocation is required.
1325 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1326 evacuate_large((P_)q, rtsTrue);
1329 /* To evacuate a small TSO, we need to relocate the update frame
1333 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1335 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1337 /* relocate the stack pointers... */
1338 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1339 new_tso->sp = (StgPtr)new_tso->sp + diff;
1340 new_tso->splim = (StgPtr)new_tso->splim + diff;
1342 relocate_TSO(tso, new_tso);
1343 upd_evacuee(q,(StgClosure *)new_tso);
1345 evacuate_mutable((StgMutClosure *)new_tso);
1346 return (StgClosure *)new_tso;
1352 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1356 barf("evacuate: strange closure type");
1362 /* -----------------------------------------------------------------------------
1363 relocate_TSO is called just after a TSO has been copied from src to
1364 dest. It adjusts the update frame list for the new location.
1365 -------------------------------------------------------------------------- */
1368 relocate_TSO(StgTSO *src, StgTSO *dest)
1375 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1379 while ((P_)su < dest->stack + dest->stack_size) {
1380 switch (get_itbl(su)->type) {
1382 /* GCC actually manages to common up these three cases! */
1385 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1390 cf = (StgCatchFrame *)su;
1391 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1396 sf = (StgSeqFrame *)su;
1397 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1406 barf("relocate_TSO");
1415 scavenge_srt(const StgInfoTable *info)
1417 StgClosure **srt, **srt_end;
1419 /* evacuate the SRT. If srt_len is zero, then there isn't an
1420 * srt field in the info table. That's ok, because we'll
1421 * never dereference it.
1423 srt = stgCast(StgClosure **,info->srt);
1424 srt_end = srt + info->srt_len;
1425 for (; srt < srt_end; srt++) {
1430 /* -----------------------------------------------------------------------------
1431 Scavenge a given step until there are no more objects in this step
1434 evac_gen is set by the caller to be either zero (for a step in a
1435 generation < N) or G where G is the generation of the step being
1438 We sometimes temporarily change evac_gen back to zero if we're
1439 scavenging a mutable object where early promotion isn't such a good
1441 -------------------------------------------------------------------------- */
1445 scavenge(step *step)
1448 const StgInfoTable *info;
1450 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1455 failed_to_evac = rtsFalse;
1457 /* scavenge phase - standard breadth-first scavenging of the
1461 while (bd != step->hp_bd || p < step->hp) {
1463 /* If we're at the end of this block, move on to the next block */
1464 if (bd != step->hp_bd && p == bd->free) {
1470 q = p; /* save ptr to object */
1472 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1473 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1475 info = get_itbl((StgClosure *)p);
1476 switch (info -> type) {
1480 StgBCO* bco = stgCast(StgBCO*,p);
1482 for (i = 0; i < bco->n_ptrs; i++) {
1483 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1485 p += bco_sizeW(bco);
1490 /* treat MVars specially, because we don't want to evacuate the
1491 * mut_link field in the middle of the closure.
1494 StgMVar *mvar = ((StgMVar *)p);
1496 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1497 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1498 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1499 p += sizeofW(StgMVar);
1500 evac_gen = saved_evac_gen;
1508 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1509 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1510 p += sizeofW(StgHeader) + 2;
1515 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1516 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1522 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1523 p += sizeofW(StgHeader) + 1;
1528 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1534 p += sizeofW(StgHeader) + 1;
1541 p += sizeofW(StgHeader) + 2;
1548 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1549 p += sizeofW(StgHeader) + 2;
1562 case IND_OLDGEN_PERM:
1568 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1569 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1570 (StgClosure *)*p = evacuate((StgClosure *)*p);
1572 p += info->layout.payload.nptrs;
1577 /* ignore MUT_CONSs */
1578 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1580 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1581 evac_gen = saved_evac_gen;
1583 p += sizeofW(StgMutVar);
1588 p += BLACKHOLE_sizeW();
1593 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1594 (StgClosure *)bh->blocking_queue =
1595 evacuate((StgClosure *)bh->blocking_queue);
1596 if (failed_to_evac) {
1597 failed_to_evac = rtsFalse;
1598 evacuate_mutable((StgMutClosure *)bh);
1600 p += BLACKHOLE_sizeW();
1604 case THUNK_SELECTOR:
1606 StgSelector *s = (StgSelector *)p;
1607 s->selectee = evacuate(s->selectee);
1608 p += THUNK_SELECTOR_sizeW();
1614 barf("scavenge:IND???\n");
1616 case CONSTR_INTLIKE:
1617 case CONSTR_CHARLIKE:
1619 case CONSTR_NOCAF_STATIC:
1623 /* Shouldn't see a static object here. */
1624 barf("scavenge: STATIC object\n");
1636 /* Shouldn't see stack frames here. */
1637 barf("scavenge: stack frame\n");
1639 case AP_UPD: /* same as PAPs */
1641 /* Treat a PAP just like a section of stack, not forgetting to
1642 * evacuate the function pointer too...
1645 StgPAP* pap = stgCast(StgPAP*,p);
1647 pap->fun = evacuate(pap->fun);
1648 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1649 p += pap_sizeW(pap);
1655 /* nothing to follow */
1656 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1660 /* follow everything */
1664 evac_gen = 0; /* repeatedly mutable */
1665 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1666 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1667 (StgClosure *)*p = evacuate((StgClosure *)*p);
1669 evac_gen = saved_evac_gen;
1673 case MUT_ARR_PTRS_FROZEN:
1674 /* follow everything */
1676 StgPtr start = p, next;
1678 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1679 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1680 (StgClosure *)*p = evacuate((StgClosure *)*p);
1682 if (failed_to_evac) {
1683 /* we can do this easier... */
1684 evacuate_mutable((StgMutClosure *)start);
1685 failed_to_evac = rtsFalse;
1696 /* chase the link field for any TSOs on the same queue */
1697 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1698 /* scavenge this thread's stack */
1699 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1700 evac_gen = saved_evac_gen;
1701 p += tso_sizeW(tso);
1708 barf("scavenge: unimplemented/strange closure type\n");
1714 /* If we didn't manage to promote all the objects pointed to by
1715 * the current object, then we have to designate this object as
1716 * mutable (because it contains old-to-new generation pointers).
1718 if (failed_to_evac) {
1719 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1720 failed_to_evac = rtsFalse;
1728 /* -----------------------------------------------------------------------------
1729 Scavenge one object.
1731 This is used for objects that are temporarily marked as mutable
1732 because they contain old-to-new generation pointers. Only certain
1733 objects can have this property.
1734 -------------------------------------------------------------------------- */
1736 scavenge_one(StgPtr p)
1741 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1742 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1744 info = get_itbl((StgClosure *)p);
1746 switch (info -> type) {
1749 case FUN_1_0: /* hardly worth specialising these guys */
1769 case IND_OLDGEN_PERM:
1775 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1776 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1777 (StgClosure *)*p = evacuate((StgClosure *)*p);
1786 case THUNK_SELECTOR:
1788 StgSelector *s = (StgSelector *)p;
1789 s->selectee = evacuate(s->selectee);
1793 case AP_UPD: /* same as PAPs */
1795 /* Treat a PAP just like a section of stack, not forgetting to
1796 * evacuate the function pointer too...
1799 StgPAP* pap = stgCast(StgPAP*,p);
1801 pap->fun = evacuate(pap->fun);
1802 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1807 /* This might happen if for instance a MUT_CONS was pointing to a
1808 * THUNK which has since been updated. The IND_OLDGEN will
1809 * be on the mutable list anyway, so we don't need to do anything
1815 barf("scavenge_one: strange object");
1818 no_luck = failed_to_evac;
1819 failed_to_evac = rtsFalse;
1824 /* -----------------------------------------------------------------------------
1825 Scavenging mutable lists.
1827 We treat the mutable list of each generation > N (i.e. all the
1828 generations older than the one being collected) as roots. We also
1829 remove non-mutable objects from the mutable list at this point.
1830 -------------------------------------------------------------------------- */
1832 static StgMutClosure *
1833 scavenge_mutable_list(StgMutClosure *p, nat gen)
1836 StgMutClosure *start;
1837 StgMutClosure **prev;
1844 failed_to_evac = rtsFalse;
1846 for (; p != END_MUT_LIST; p = *prev) {
1848 /* make sure the info pointer is into text space */
1849 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1850 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1853 switch(info->type) {
1855 case MUT_ARR_PTRS_FROZEN:
1856 /* remove this guy from the mutable list, but follow the ptrs
1857 * anyway (and make sure they get promoted to this gen).
1862 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1864 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1865 (StgClosure *)*q = evacuate((StgClosure *)*q);
1869 if (failed_to_evac) {
1870 failed_to_evac = rtsFalse;
1871 prev = &p->mut_link;
1873 *prev = p->mut_link;
1879 /* follow everything */
1880 prev = &p->mut_link;
1884 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1885 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1886 (StgClosure *)*q = evacuate((StgClosure *)*q);
1892 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1893 * it from the mutable list if possible by promoting whatever it
1896 if (p->header.info == &MUT_CONS_info) {
1898 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1899 /* didn't manage to promote everything, so leave the
1900 * MUT_CONS on the list.
1902 prev = &p->mut_link;
1904 *prev = p->mut_link;
1908 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1909 prev = &p->mut_link;
1915 StgMVar *mvar = (StgMVar *)p;
1916 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1917 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1918 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1919 prev = &p->mut_link;
1924 /* follow ptrs and remove this from the mutable list */
1926 StgTSO *tso = (StgTSO *)p;
1928 /* Don't bother scavenging if this thread is dead
1930 if (!(tso->whatNext == ThreadComplete ||
1931 tso->whatNext == ThreadKilled)) {
1932 /* Don't need to chase the link field for any TSOs on the
1933 * same queue. Just scavenge this thread's stack
1935 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1938 /* Don't take this TSO off the mutable list - it might still
1939 * point to some younger objects (because we set evac_gen to 0
1942 prev = &tso->mut_link;
1947 case IND_OLDGEN_PERM:
1949 /* Try to pull the indirectee into this generation, so we can
1950 * remove the indirection from the mutable list.
1953 ((StgIndOldGen *)p)->indirectee =
1954 evacuate(((StgIndOldGen *)p)->indirectee);
1957 if (failed_to_evac) {
1958 failed_to_evac = rtsFalse;
1959 prev = &p->mut_link;
1961 *prev = p->mut_link;
1962 /* the mut_link field of an IND_STATIC is overloaded as the
1963 * static link field too (it just so happens that we don't need
1964 * both at the same time), so we need to NULL it out when
1965 * removing this object from the mutable list because the static
1966 * link fields are all assumed to be NULL before doing a major
1975 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1976 (StgClosure *)bh->blocking_queue =
1977 evacuate((StgClosure *)bh->blocking_queue);
1978 prev = &p->mut_link;
1983 /* shouldn't have anything else on the mutables list */
1984 barf("scavenge_mutable_object: non-mutable object?");
1991 scavenge_static(void)
1993 StgClosure* p = static_objects;
1994 const StgInfoTable *info;
1996 /* Always evacuate straight to the oldest generation for static
1998 evac_gen = oldest_gen->no;
2000 /* keep going until we've scavenged all the objects on the linked
2002 while (p != END_OF_STATIC_LIST) {
2006 /* make sure the info pointer is into text space */
2007 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2008 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2010 /* Take this object *off* the static_objects list,
2011 * and put it on the scavenged_static_objects list.
2013 static_objects = STATIC_LINK(info,p);
2014 STATIC_LINK(info,p) = scavenged_static_objects;
2015 scavenged_static_objects = p;
2017 switch (info -> type) {
2021 StgInd *ind = (StgInd *)p;
2022 ind->indirectee = evacuate(ind->indirectee);
2024 /* might fail to evacuate it, in which case we have to pop it
2025 * back on the mutable list (and take it off the
2026 * scavenged_static list because the static link and mut link
2027 * pointers are one and the same).
2029 if (failed_to_evac) {
2030 failed_to_evac = rtsFalse;
2031 scavenged_static_objects = STATIC_LINK(info,p);
2032 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
2033 oldest_gen->mut_list = (StgMutClosure *)ind;
2047 next = (P_)p->payload + info->layout.payload.ptrs;
2048 /* evacuate the pointers */
2049 for (q = (P_)p->payload; q < next; q++) {
2050 (StgClosure *)*q = evacuate((StgClosure *)*q);
2056 barf("scavenge_static");
2059 ASSERT(failed_to_evac == rtsFalse);
2061 /* get the next static object from the list. Remeber, there might
2062 * be more stuff on this list now that we've done some evacuating!
2063 * (static_objects is a global)
2069 /* -----------------------------------------------------------------------------
2070 scavenge_stack walks over a section of stack and evacuates all the
2071 objects pointed to by it. We can use the same code for walking
2072 PAPs, since these are just sections of copied stack.
2073 -------------------------------------------------------------------------- */
2076 scavenge_stack(StgPtr p, StgPtr stack_end)
2079 const StgInfoTable* info;
2083 * Each time around this loop, we are looking at a chunk of stack
2084 * that starts with either a pending argument section or an
2085 * activation record.
2088 while (p < stack_end) {
2089 q = *stgCast(StgPtr*,p);
2091 /* If we've got a tag, skip over that many words on the stack */
2092 if (IS_ARG_TAG(stgCast(StgWord,q))) {
2097 /* Is q a pointer to a closure?
2099 if (! LOOKS_LIKE_GHC_INFO(q)) {
2102 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
2103 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
2105 /* otherwise, must be a pointer into the allocation space.
2109 (StgClosure *)*p = evacuate((StgClosure *)q);
2115 * Otherwise, q must be the info pointer of an activation
2116 * record. All activation records have 'bitmap' style layout
2119 info = get_itbl(stgCast(StgClosure*,p));
2121 switch (info->type) {
2123 /* Dynamic bitmap: the mask is stored on the stack */
2125 bitmap = stgCast(StgRetDyn*,p)->liveness;
2126 p = &payloadWord(stgCast(StgRetDyn*,p),0);
2129 /* probably a slow-entry point return address: */
2135 /* Specialised code for update frames, since they're so common.
2136 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2137 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2141 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2143 StgClosureType type = get_itbl(frame->updatee)->type;
2145 p += sizeofW(StgUpdateFrame);
2146 if (type == EVACUATED) {
2147 frame->updatee = evacuate(frame->updatee);
2150 bdescr *bd = Bdescr((P_)frame->updatee);
2152 if (bd->gen->no > N) {
2153 if (bd->gen->no < evac_gen) {
2154 failed_to_evac = rtsTrue;
2158 step = bd->step->to;
2162 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2163 sizeofW(StgHeader), step);
2164 upd_evacuee(frame->updatee,to);
2165 frame->updatee = to;
2168 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2169 upd_evacuee(frame->updatee,to);
2170 frame->updatee = to;
2171 evacuate_mutable((StgMutClosure *)to);
2174 barf("scavenge_stack: UPDATE_FRAME updatee");
2179 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2186 bitmap = info->layout.bitmap;
2189 while (bitmap != 0) {
2190 if ((bitmap & 1) == 0) {
2191 (StgClosure *)*p = evacuate((StgClosure *)*p);
2194 bitmap = bitmap >> 1;
2201 /* large bitmap (> 32 entries) */
2206 StgLargeBitmap *large_bitmap;
2209 large_bitmap = info->layout.large_bitmap;
2212 for (i=0; i<large_bitmap->size; i++) {
2213 bitmap = large_bitmap->bitmap[i];
2214 q = p + sizeof(W_) * 8;
2215 while (bitmap != 0) {
2216 if ((bitmap & 1) == 0) {
2217 (StgClosure *)*p = evacuate((StgClosure *)*p);
2220 bitmap = bitmap >> 1;
2222 if (i+1 < large_bitmap->size) {
2224 (StgClosure *)*p = evacuate((StgClosure *)*p);
2230 /* and don't forget to follow the SRT */
2235 barf("scavenge_stack: weird activation record found on stack.\n");
2240 /*-----------------------------------------------------------------------------
2241 scavenge the large object list.
2243 evac_gen set by caller; similar games played with evac_gen as with
2244 scavenge() - see comment at the top of scavenge(). Most large
2245 objects are (repeatedly) mutable, so most of the time evac_gen will
2247 --------------------------------------------------------------------------- */
2250 scavenge_large(step *step)
2254 const StgInfoTable* info;
2255 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2257 evac_gen = 0; /* most objects are mutable */
2258 bd = step->new_large_objects;
2260 for (; bd != NULL; bd = step->new_large_objects) {
2262 /* take this object *off* the large objects list and put it on
2263 * the scavenged large objects list. This is so that we can
2264 * treat new_large_objects as a stack and push new objects on
2265 * the front when evacuating.
2267 step->new_large_objects = bd->link;
2268 dbl_link_onto(bd, &step->scavenged_large_objects);
2271 info = get_itbl(stgCast(StgClosure*,p));
2273 switch (info->type) {
2275 /* only certain objects can be "large"... */
2279 /* nothing to follow */
2283 /* follow everything */
2287 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2288 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2289 (StgClosure *)*p = evacuate((StgClosure *)*p);
2294 case MUT_ARR_PTRS_FROZEN:
2295 /* follow everything */
2297 StgPtr start = p, next;
2299 evac_gen = saved_evac_gen; /* not really mutable */
2300 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2301 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2302 (StgClosure *)*p = evacuate((StgClosure *)*p);
2305 if (failed_to_evac) {
2306 evacuate_mutable((StgMutClosure *)start);
2313 StgBCO* bco = stgCast(StgBCO*,p);
2315 evac_gen = saved_evac_gen;
2316 for (i = 0; i < bco->n_ptrs; i++) {
2317 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2328 /* chase the link field for any TSOs on the same queue */
2329 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2330 /* scavenge this thread's stack */
2331 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2336 barf("scavenge_large: unknown/strange object");
2342 zeroStaticObjectList(StgClosure* first_static)
2346 const StgInfoTable *info;
2348 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2350 link = STATIC_LINK(info, p);
2351 STATIC_LINK(info,p) = NULL;
2355 /* This function is only needed because we share the mutable link
2356 * field with the static link field in an IND_STATIC, so we have to
2357 * zero the mut_link field before doing a major GC, which needs the
2358 * static link field.
2360 * It doesn't do any harm to zero all the mutable link fields on the
2364 zeroMutableList(StgMutClosure *first)
2366 StgMutClosure *next, *c;
2368 for (c = first; c != END_MUT_LIST; c = next) {
2374 /* -----------------------------------------------------------------------------
2376 -------------------------------------------------------------------------- */
2378 void RevertCAFs(void)
2380 while (enteredCAFs != END_CAF_LIST) {
2381 StgCAF* caf = enteredCAFs;
2383 enteredCAFs = caf->link;
2384 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2385 SET_INFO(caf,&CAF_UNENTERED_info);
2386 caf->value = stgCast(StgClosure*,0xdeadbeef);
2387 caf->link = stgCast(StgCAF*,0xdeadbeef);
2391 void revertDeadCAFs(void)
2393 StgCAF* caf = enteredCAFs;
2394 enteredCAFs = END_CAF_LIST;
2395 while (caf != END_CAF_LIST) {
2396 StgCAF* next = caf->link;
2398 switch(GET_INFO(caf)->type) {
2401 /* This object has been evacuated, it must be live. */
2402 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2403 new->link = enteredCAFs;
2409 SET_INFO(caf,&CAF_UNENTERED_info);
2410 caf->value = stgCast(StgClosure*,0xdeadbeef);
2411 caf->link = stgCast(StgCAF*,0xdeadbeef);
2415 barf("revertDeadCAFs: enteredCAFs list corrupted");
2421 /* -----------------------------------------------------------------------------
2422 Sanity code for CAF garbage collection.
2424 With DEBUG turned on, we manage a CAF list in addition to the SRT
2425 mechanism. After GC, we run down the CAF list and blackhole any
2426 CAFs which have been garbage collected. This means we get an error
2427 whenever the program tries to enter a garbage collected CAF.
2429 Any garbage collected CAFs are taken off the CAF list at the same
2431 -------------------------------------------------------------------------- */
2439 const StgInfoTable *info;
2450 ASSERT(info->type == IND_STATIC);
2452 if (STATIC_LINK(info,p) == NULL) {
2453 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2455 SET_INFO(p,&BLACKHOLE_info);
2456 p = STATIC_LINK2(info,p);
2460 pp = &STATIC_LINK2(info,p);
2467 /* fprintf(stderr, "%d CAFs live\n", i); */
2471 /* -----------------------------------------------------------------------------
2474 Whenever a thread returns to the scheduler after possibly doing
2475 some work, we have to run down the stack and black-hole all the
2476 closures referred to by update frames.
2477 -------------------------------------------------------------------------- */
2480 threadLazyBlackHole(StgTSO *tso)
2482 StgUpdateFrame *update_frame;
2483 StgBlockingQueue *bh;
2486 stack_end = &tso->stack[tso->stack_size];
2487 update_frame = tso->su;
2490 switch (get_itbl(update_frame)->type) {
2493 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2497 bh = (StgBlockingQueue *)update_frame->updatee;
2499 /* if the thunk is already blackholed, it means we've also
2500 * already blackholed the rest of the thunks on this stack,
2501 * so we can stop early.
2503 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
2504 * don't interfere with this optimisation.
2506 if (bh->header.info == &BLACKHOLE_info) {
2510 if (bh->header.info != &BLACKHOLE_BQ_info &&
2511 bh->header.info != &CAF_BLACKHOLE_info) {
2512 SET_INFO(bh,&BLACKHOLE_info);
2515 update_frame = update_frame->link;
2519 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2525 barf("threadPaused");
2530 /* -----------------------------------------------------------------------------
2533 * Code largely pinched from old RTS, then hacked to bits. We also do
2534 * lazy black holing here.
2536 * -------------------------------------------------------------------------- */
2539 threadSqueezeStack(StgTSO *tso)
2541 lnat displacement = 0;
2542 StgUpdateFrame *frame;
2543 StgUpdateFrame *next_frame; /* Temporally next */
2544 StgUpdateFrame *prev_frame; /* Temporally previous */
2546 rtsBool prev_was_update_frame;
2548 bottom = &(tso->stack[tso->stack_size]);
2551 /* There must be at least one frame, namely the STOP_FRAME.
2553 ASSERT((P_)frame < bottom);
2555 /* Walk down the stack, reversing the links between frames so that
2556 * we can walk back up as we squeeze from the bottom. Note that
2557 * next_frame and prev_frame refer to next and previous as they were
2558 * added to the stack, rather than the way we see them in this
2559 * walk. (It makes the next loop less confusing.)
2561 * Stop if we find an update frame pointing to a black hole
2562 * (see comment in threadLazyBlackHole()).
2566 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2567 prev_frame = frame->link;
2568 frame->link = next_frame;
2571 if (get_itbl(frame)->type == UPDATE_FRAME
2572 && frame->updatee->header.info == &BLACKHOLE_info) {
2577 /* Now, we're at the bottom. Frame points to the lowest update
2578 * frame on the stack, and its link actually points to the frame
2579 * above. We have to walk back up the stack, squeezing out empty
2580 * update frames and turning the pointers back around on the way
2583 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2584 * we never want to eliminate it anyway. Just walk one step up
2585 * before starting to squeeze. When you get to the topmost frame,
2586 * remember that there are still some words above it that might have
2593 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2596 * Loop through all of the frames (everything except the very
2597 * bottom). Things are complicated by the fact that we have
2598 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2599 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2601 while (frame != NULL) {
2603 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2604 rtsBool is_update_frame;
2606 next_frame = frame->link;
2607 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2610 * 1. both the previous and current frame are update frames
2611 * 2. the current frame is empty
2613 if (prev_was_update_frame && is_update_frame &&
2614 (P_)prev_frame == frame_bottom + displacement) {
2616 /* Now squeeze out the current frame */
2617 StgClosure *updatee_keep = prev_frame->updatee;
2618 StgClosure *updatee_bypass = frame->updatee;
2621 fprintf(stderr, "squeezing frame at %p\n", frame);
2624 /* Deal with blocking queues. If both updatees have blocked
2625 * threads, then we should merge the queues into the update
2626 * frame that we're keeping.
2628 * Alternatively, we could just wake them up: they'll just go
2629 * straight to sleep on the proper blackhole! This is less code
2630 * and probably less bug prone, although it's probably much
2633 #if 0 /* do it properly... */
2634 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2635 /* Sigh. It has one. Don't lose those threads! */
2636 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2637 /* Urgh. Two queues. Merge them. */
2638 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2640 while (keep_tso->link != END_TSO_QUEUE) {
2641 keep_tso = keep_tso->link;
2643 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2646 /* For simplicity, just swap the BQ for the BH */
2647 P_ temp = updatee_keep;
2649 updatee_keep = updatee_bypass;
2650 updatee_bypass = temp;
2652 /* Record the swap in the kept frame (below) */
2653 prev_frame->updatee = updatee_keep;
2658 TICK_UPD_SQUEEZED();
2659 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2661 sp = (P_)frame - 1; /* sp = stuff to slide */
2662 displacement += sizeofW(StgUpdateFrame);
2665 /* No squeeze for this frame */
2666 sp = frame_bottom - 1; /* Keep the current frame */
2668 /* Do lazy black-holing.
2670 if (is_update_frame) {
2671 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2672 if (bh->header.info != &BLACKHOLE_BQ_info &&
2673 bh->header.info != &CAF_BLACKHOLE_info) {
2674 SET_INFO(bh,&BLACKHOLE_info);
2678 /* Fix the link in the current frame (should point to the frame below) */
2679 frame->link = prev_frame;
2680 prev_was_update_frame = is_update_frame;
2683 /* Now slide all words from sp up to the next frame */
2685 if (displacement > 0) {
2686 P_ next_frame_bottom;
2688 if (next_frame != NULL)
2689 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2691 next_frame_bottom = tso->sp - 1;
2694 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2698 while (sp >= next_frame_bottom) {
2699 sp[displacement] = *sp;
2703 (P_)prev_frame = (P_)frame + displacement;
2707 tso->sp += displacement;
2708 tso->su = prev_frame;
2711 /* -----------------------------------------------------------------------------
2714 * We have to prepare for GC - this means doing lazy black holing
2715 * here. We also take the opportunity to do stack squeezing if it's
2717 * -------------------------------------------------------------------------- */
2720 threadPaused(StgTSO *tso)
2722 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2723 threadSqueezeStack(tso); /* does black holing too */
2725 threadLazyBlackHole(tso);