1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.17 1999/01/20 16:07:40 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
26 /* STATIC OBJECT LIST.
29 * We maintain a linked list of static objects that are still live.
30 * The requirements for this list are:
32 * - we need to scan the list while adding to it, in order to
33 * scavenge all the static objects (in the same way that
34 * breadth-first scavenging works for dynamic objects).
36 * - we need to be able to tell whether an object is already on
37 * the list, to break loops.
39 * Each static object has a "static link field", which we use for
40 * linking objects on to the list. We use a stack-type list, consing
41 * objects on the front as they are added (this means that the
42 * scavenge phase is depth-first, not breadth-first, but that
45 * A separate list is kept for objects that have been scavenged
46 * already - this is so that we can zero all the marks afterwards.
48 * An object is on the list if its static link field is non-zero; this
49 * means that we have to mark the end of the list with '1', not NULL.
51 * Extra notes for generational GC:
53 * Each generation has a static object list associated with it. When
54 * collecting generations up to N, we treat the static object lists
55 * from generations > N as roots.
57 * We build up a static object list while collecting generations 0..N,
58 * which is then appended to the static object list of generation N+1.
60 StgClosure* static_objects; /* live static objects */
61 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
63 /* N is the oldest generation being collected, where the generations
64 * are numbered starting at 0. A major GC (indicated by the major_gc
65 * flag) is when we're collecting all generations. We only attempt to
66 * deal with static objects and GC CAFs when doing a major GC.
69 static rtsBool major_gc;
71 /* Youngest generation that objects should be evacuated to in
72 * evacuate(). (Logically an argument to evacuate, but it's static
73 * a lot of the time so we optimise it into a global variable).
79 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
80 static rtsBool weak_done; /* all done for this pass */
82 /* Flag indicating failure to evacuate an object to the desired
85 static rtsBool failed_to_evac;
87 /* Old to-space (used for two-space collector only)
91 /* -----------------------------------------------------------------------------
92 Static function declarations
93 -------------------------------------------------------------------------- */
95 static StgClosure *evacuate(StgClosure *q);
96 static void zeroStaticObjectList(StgClosure* first_static);
97 static rtsBool traverse_weak_ptr_list(void);
98 static void zeroMutableList(StgMutClosure *first);
99 static void revertDeadCAFs(void);
101 static void scavenge_stack(StgPtr p, StgPtr stack_end);
102 static void scavenge_large(step *step);
103 static void scavenge(step *step);
104 static void scavenge_static(void);
105 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
108 static void gcCAFs(void);
111 /* -----------------------------------------------------------------------------
114 For garbage collecting generation N (and all younger generations):
116 - follow all pointers in the root set. the root set includes all
117 mutable objects in all steps in all generations.
119 - for each pointer, evacuate the object it points to into either
120 + to-space in the next higher step in that generation, if one exists,
121 + if the object's generation == N, then evacuate it to the next
122 generation if one exists, or else to-space in the current
124 + if the object's generation < N, then evacuate it to to-space
125 in the next generation.
127 - repeatedly scavenge to-space from each step in each generation
128 being collected until no more objects can be evacuated.
130 - free from-space in each step, and set from-space = to-space.
132 -------------------------------------------------------------------------- */
134 void GarbageCollect(void (*get_roots)(void))
138 lnat live, allocated, collected = 0;
142 CostCentreStack *prev_CCS;
145 /* tell the stats department that we've started a GC */
148 /* attribute any costs to CCS_GC */
154 /* We might have been called from Haskell land by _ccall_GC, in
155 * which case we need to call threadPaused() because the scheduler
156 * won't have done it.
158 if (CurrentTSO) { threadPaused(CurrentTSO); }
160 /* Approximate how much we allocated: number of blocks in the
161 * nursery + blocks allocated via allocate() - unused nusery blocks.
162 * This leaves a little slop at the end of each block, and doesn't
163 * take into account large objects (ToDo).
165 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
166 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
167 allocated -= BLOCK_SIZE_W;
170 /* Figure out which generation to collect
173 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
174 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
178 major_gc = (N == RtsFlags.GcFlags.generations-1);
180 /* check stack sanity *before* GC (ToDo: check all threads) */
181 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
182 IF_DEBUG(sanity, checkFreeListSanity());
184 /* Initialise the static object lists
186 static_objects = END_OF_STATIC_LIST;
187 scavenged_static_objects = END_OF_STATIC_LIST;
189 /* zero the mutable list for the oldest generation (see comment by
190 * zeroMutableList below).
193 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
196 /* Save the old to-space if we're doing a two-space collection
198 if (RtsFlags.GcFlags.generations == 1) {
199 old_to_space = g0s0->to_space;
200 g0s0->to_space = NULL;
203 /* Initialise to-space in all the generations/steps that we're
206 for (g = 0; g <= N; g++) {
207 generations[g].mut_list = END_MUT_LIST;
209 for (s = 0; s < generations[g].n_steps; s++) {
211 /* generation 0, step 0 doesn't need to-space */
212 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
216 /* Get a free block for to-space. Extra blocks will be chained on
220 step = &generations[g].steps[s];
221 ASSERT(step->gen->no == g);
222 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
223 bd->gen = &generations[g];
226 bd->evacuated = 1; /* it's a to-space block */
227 step->hp = bd->start;
228 step->hpLim = step->hp + BLOCK_SIZE_W;
231 step->to_blocks = 1; /* ???? */
232 step->scan = bd->start;
234 step->new_large_objects = NULL;
235 step->scavenged_large_objects = NULL;
236 /* mark the large objects as not evacuated yet */
237 for (bd = step->large_objects; bd; bd = bd->link) {
243 /* make sure the older generations have at least one block to
244 * allocate into (this makes things easier for copy(), see below.
246 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
247 for (s = 0; s < generations[g].n_steps; s++) {
248 step = &generations[g].steps[s];
249 if (step->hp_bd == NULL) {
251 bd->gen = &generations[g];
254 bd->evacuated = 0; /* *not* a to-space block */
255 step->hp = bd->start;
256 step->hpLim = step->hp + BLOCK_SIZE_W;
261 /* Set the scan pointer for older generations: remember we
262 * still have to scavenge objects that have been promoted. */
263 step->scan = step->hp;
264 step->scan_bd = step->hp_bd;
265 step->to_space = NULL;
267 step->new_large_objects = NULL;
268 step->scavenged_large_objects = NULL;
272 /* -----------------------------------------------------------------------
273 * follow all the roots that we know about:
274 * - mutable lists from each generation > N
275 * we want to *scavenge* these roots, not evacuate them: they're not
276 * going to move in this GC.
277 * Also: do them in reverse generation order. This is because we
278 * often want to promote objects that are pointed to by older
279 * generations early, so we don't have to repeatedly copy them.
280 * Doing the generations in reverse order ensures that we don't end
281 * up in the situation where we want to evac an object to gen 3 and
282 * it has already been evaced to gen 2.
285 StgMutClosure *tmp, **pp;
286 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
287 generations[g].saved_mut_list = generations[g].mut_list;
288 generations[g].mut_list = END_MUT_LIST;
291 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
292 tmp = scavenge_mutable_list(generations[g].saved_mut_list, g);
293 pp = &generations[g].mut_list;
294 while (*pp != END_MUT_LIST) {
295 pp = &(*pp)->mut_link;
301 /* follow all the roots that the application knows about.
306 /* And don't forget to mark the TSO if we got here direct from
309 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
312 /* Mark the weak pointer list, and prepare to detect dead weak
316 old_weak_ptr_list = weak_ptr_list;
317 weak_ptr_list = NULL;
318 weak_done = rtsFalse;
322 /* ToDo: To fix the caf leak, we need to make the commented out
323 * parts of this code do something sensible - as described in
326 extern void markHugsObjects(void);
328 /* ToDo: This (undefined) function should contain the scavenge
329 * loop immediately below this block of code - but I'm not sure
330 * enough of the details to do this myself.
332 scavengeEverything();
333 /* revert dead CAFs and update enteredCAFs list */
338 /* This will keep the CAFs and the attached BCOs alive
339 * but the values will have been reverted
341 scavengeEverything();
346 /* -------------------------------------------------------------------------
347 * Repeatedly scavenge all the areas we know about until there's no
348 * more scavenging to be done.
355 /* scavenge static objects */
356 if (major_gc && static_objects != END_OF_STATIC_LIST) {
360 /* When scavenging the older generations: Objects may have been
361 * evacuated from generations <= N into older generations, and we
362 * need to scavenge these objects. We're going to try to ensure that
363 * any evacuations that occur move the objects into at least the
364 * same generation as the object being scavenged, otherwise we
365 * have to create new entries on the mutable list for the older
369 /* scavenge each step in generations 0..maxgen */
372 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
373 for (s = 0; s < generations[gen].n_steps; s++) {
374 step = &generations[gen].steps[s];
376 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
380 if (step->new_large_objects != NULL) {
381 scavenge_large(step);
387 if (flag) { goto loop; }
389 /* must be last... */
390 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
395 /* Set the maximum blocks for the oldest generation, based on twice
396 * the amount of live data now, adjusted to fit the maximum heap
399 * This is an approximation, since in the worst case we'll need
400 * twice the amount of live data plus whatever space the other
403 if (RtsFlags.GcFlags.generations > 1) {
405 oldest_gen->max_blocks =
406 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
407 RtsFlags.GcFlags.minOldGenSize);
408 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
409 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
410 if (((int)oldest_gen->max_blocks -
411 (int)oldest_gen->steps[0].to_blocks) <
412 (RtsFlags.GcFlags.pcFreeHeap *
413 RtsFlags.GcFlags.maxHeapSize / 200)) {
419 /* For a two-space collector, we need to resize the nursery. */
421 /* set up a new nursery. Allocate a nursery size based on a
422 * function of the amount of live data (currently a factor of 2,
423 * should be configurable (ToDo)). Use the blocks from the old
424 * nursery if possible, freeing up any left over blocks.
426 * If we get near the maximum heap size, then adjust our nursery
427 * size accordingly. If the nursery is the same size as the live
428 * data (L), then we need 3L bytes. We can reduce the size of the
429 * nursery to bring the required memory down near 2L bytes.
431 * A normal 2-space collector would need 4L bytes to give the same
432 * performance we get from 3L bytes, reducing to the same
433 * performance at 2L bytes.
435 nat blocks = g0s0->to_blocks;
437 if ( blocks * 4 > RtsFlags.GcFlags.maxHeapSize ) {
438 int adjusted_blocks; /* signed on purpose */
441 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
442 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
443 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
444 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
447 blocks = adjusted_blocks;
451 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
452 blocks = RtsFlags.GcFlags.minAllocAreaSize;
456 if (nursery_blocks < blocks) {
457 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
459 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
463 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
465 for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
473 g0s0->n_blocks = nursery_blocks = blocks;
476 /* run through all the generations/steps and tidy up
478 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
481 generations[g].collections++; /* for stats */
484 for (s = 0; s < generations[g].n_steps; s++) {
486 step = &generations[g].steps[s];
488 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
489 /* Tidy the end of the to-space chains */
490 step->hp_bd->free = step->hp;
491 step->hp_bd->link = NULL;
494 /* for generations we collected... */
497 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
499 /* free old memory and shift to-space into from-space for all
500 * the collected steps (except the allocation area). These
501 * freed blocks will probaby be quickly recycled.
503 if (!(g == 0 && s == 0)) {
504 freeChain(step->blocks);
505 step->blocks = step->to_space;
506 step->n_blocks = step->to_blocks;
507 step->to_space = NULL;
509 for (bd = step->blocks; bd != NULL; bd = bd->link) {
510 bd->evacuated = 0; /* now from-space */
514 /* LARGE OBJECTS. The current live large objects are chained on
515 * scavenged_large, having been moved during garbage
516 * collection from large_objects. Any objects left on
517 * large_objects list are therefore dead, so we free them here.
519 for (bd = step->large_objects; bd != NULL; bd = next) {
524 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
527 step->large_objects = step->scavenged_large_objects;
529 /* Set the maximum blocks for this generation, interpolating
530 * between the maximum size of the oldest and youngest
533 * max_blocks = alloc_area_size +
534 * (oldgen_max_blocks - alloc_area_size) * G
535 * -----------------------------------------
539 generations[g].max_blocks =
540 RtsFlags.GcFlags.minAllocAreaSize +
541 (((oldest_gen->max_blocks - RtsFlags.GcFlags.minAllocAreaSize) * g)
542 / (RtsFlags.GcFlags.generations-1));
545 /* for older generations... */
548 /* For older generations, we need to append the
549 * scavenged_large_object list (i.e. large objects that have been
550 * promoted during this GC) to the large_object list for that step.
552 for (bd = step->scavenged_large_objects; bd; bd = next) {
555 dbl_link_onto(bd, &step->large_objects);
558 /* add the new blocks we promoted during this GC */
559 step->n_blocks += step->to_blocks;
564 /* Two-space collector:
565 * Free the old to-space, and estimate the amount of live data.
567 if (RtsFlags.GcFlags.generations == 1) {
568 if (old_to_space != NULL) {
569 freeChain(old_to_space);
571 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
572 bd->evacuated = 0; /* now from-space */
574 live = g0s0->to_blocks * BLOCK_SIZE_W +
575 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
577 /* Generational collector:
578 * estimate the amount of live data.
582 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
583 for (s = 0; s < generations[g].n_steps; s++) {
584 /* approximate amount of live data (doesn't take into account slop
585 * at end of each block). ToDo: this more accurately.
587 if (g == 0 && s == 0) { continue; }
588 step = &generations[g].steps[s];
589 live += step->n_blocks * BLOCK_SIZE_W +
590 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
595 /* revert dead CAFs and update enteredCAFs list */
598 /* mark the garbage collected CAFs as dead */
600 if (major_gc) { gcCAFs(); }
603 /* zero the scavenged static object list */
605 zeroStaticObjectList(scavenged_static_objects);
610 for (bd = g0s0->blocks; bd; bd = bd->link) {
611 bd->free = bd->start;
612 ASSERT(bd->gen == g0);
613 ASSERT(bd->step == g0s0);
615 current_nursery = g0s0->blocks;
617 /* Free the small objects allocated via allocate(), since this will
618 * all have been copied into G0S1 now.
620 if (small_alloc_list != NULL) {
621 freeChain(small_alloc_list);
623 small_alloc_list = NULL;
625 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
627 /* start any pending finalisers */
628 scheduleFinalisers(old_weak_ptr_list);
630 /* check sanity after GC */
632 if (RtsFlags.GcFlags.generations == 1) {
633 IF_DEBUG(sanity, checkHeap(g0s0->to_space, NULL));
634 IF_DEBUG(sanity, checkChain(g0s0->large_objects));
637 for (g = 0; g <= N; g++) {
638 for (s = 0; s < generations[g].n_steps; s++) {
639 if (g == 0 && s == 0) { continue; }
640 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
643 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
644 for (s = 0; s < generations[g].n_steps; s++) {
645 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks,
646 generations[g].steps[s].blocks->start));
647 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
650 IF_DEBUG(sanity, checkFreeListSanity());
654 IF_DEBUG(gc, stat_describe_gens());
657 /* symbol-table based profiling */
658 /* heapCensus(to_space); */ /* ToDo */
661 /* restore enclosing cost centre */
666 /* check for memory leaks if sanity checking is on */
667 IF_DEBUG(sanity, memInventory());
669 /* ok, GC over: tell the stats department what happened. */
670 stat_endGC(allocated, collected, live, N);
673 /* -----------------------------------------------------------------------------
676 traverse_weak_ptr_list is called possibly many times during garbage
677 collection. It returns a flag indicating whether it did any work
678 (i.e. called evacuate on any live pointers).
680 Invariant: traverse_weak_ptr_list is called when the heap is in an
681 idempotent state. That means that there are no pending
682 evacuate/scavenge operations. This invariant helps the weak
683 pointer code decide which weak pointers are dead - if there are no
684 new live weak pointers, then all the currently unreachable ones are
687 For generational GC: we just don't try to finalise weak pointers in
688 older generations than the one we're collecting. This could
689 probably be optimised by keeping per-generation lists of weak
690 pointers, but for a few weak pointers this scheme will work.
691 -------------------------------------------------------------------------- */
694 traverse_weak_ptr_list(void)
696 StgWeak *w, **last_w, *next_w;
698 const StgInfoTable *info;
699 rtsBool flag = rtsFalse;
701 if (weak_done) { return rtsFalse; }
703 /* doesn't matter where we evacuate values/finalisers to, since
704 * these pointers are treated as roots (iff the keys are alive).
708 last_w = &old_weak_ptr_list;
709 for (w = old_weak_ptr_list; w; w = next_w) {
712 /* ignore weak pointers in older generations */
713 if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) {
714 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive (in old gen) at %p\n", w));
715 /* remove this weak ptr from the old_weak_ptr list */
717 /* and put it on the new weak ptr list */
719 w->link = weak_ptr_list;
725 info = get_itbl(target);
726 switch (info->type) {
731 case IND_OLDGEN: /* rely on compatible layout with StgInd */
732 case IND_OLDGEN_PERM:
733 /* follow indirections */
734 target = ((StgInd *)target)->indirectee;
738 /* If key is alive, evacuate value and finaliser and
739 * place weak ptr on new weak ptr list.
741 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w));
742 w->key = ((StgEvacuated *)target)->evacuee;
743 w->value = evacuate(w->value);
744 w->finaliser = evacuate(w->finaliser);
746 /* remove this weak ptr from the old_weak_ptr list */
749 /* and put it on the new weak ptr list */
751 w->link = weak_ptr_list;
756 default: /* key is dead */
763 /* If we didn't make any changes, then we can go round and kill all
764 * the dead weak pointers. The old_weak_ptr list is used as a list
765 * of pending finalisers later on.
767 if (flag == rtsFalse) {
768 for (w = old_weak_ptr_list; w; w = w->link) {
769 w->value = evacuate(w->value);
770 w->finaliser = evacuate(w->finaliser);
779 MarkRoot(StgClosure *root)
781 root = evacuate(root);
785 static inline void addBlock(step *step)
787 bdescr *bd = allocBlock();
791 if (step->gen->no <= N) {
797 step->hp_bd->free = step->hp;
798 step->hp_bd->link = bd;
799 step->hp = bd->start;
800 step->hpLim = step->hp + BLOCK_SIZE_W;
805 static __inline__ StgClosure *
806 copy(StgClosure *src, nat size, bdescr *bd)
811 /* Find out where we're going, using the handy "to" pointer in
812 * the step of the source object. If it turns out we need to
813 * evacuate to an older generation, adjust it here (see comment
817 if (step->gen->no < evac_gen) {
818 step = &generations[evac_gen].steps[0];
821 /* chain a new block onto the to-space for the destination step if
824 if (step->hp + size >= step->hpLim) {
830 for(to = dest, from = (P_)src; size>0; --size) {
833 return (StgClosure *)dest;
836 /* Special version of copy() for when we only want to copy the info
837 * pointer of an object, but reserve some padding after it. This is
838 * used to optimise evacuation of BLACKHOLEs.
841 static __inline__ StgClosure *
842 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, bdescr *bd)
848 if (step->gen->no < evac_gen) {
849 step = &generations[evac_gen].steps[0];
852 if (step->hp + size_to_reserve >= step->hpLim) {
857 step->hp += size_to_reserve;
858 for(to = dest, from = (P_)src; size_to_copy>0; --size_to_copy) {
862 return (StgClosure *)dest;
865 static __inline__ void
866 upd_evacuee(StgClosure *p, StgClosure *dest)
868 StgEvacuated *q = (StgEvacuated *)p;
870 SET_INFO(q,&EVACUATED_info);
874 /* -----------------------------------------------------------------------------
875 Evacuate a mutable object
877 If we evacuate a mutable object to an old generation, cons the
878 object onto the older generation's mutable list.
879 -------------------------------------------------------------------------- */
882 evacuate_mutable(StgMutClosure *c)
887 if (bd->gen->no > 0) {
888 c->mut_link = bd->gen->mut_list;
889 bd->gen->mut_list = c;
893 /* -----------------------------------------------------------------------------
894 Evacuate a large object
896 This just consists of removing the object from the (doubly-linked)
897 large_alloc_list, and linking it on to the (singly-linked)
898 new_large_objects list, from where it will be scavenged later.
900 Convention: bd->evacuated is /= 0 for a large object that has been
901 evacuated, or 0 otherwise.
902 -------------------------------------------------------------------------- */
905 evacuate_large(StgPtr p, rtsBool mutable)
907 bdescr *bd = Bdescr(p);
910 /* should point to the beginning of the block */
911 ASSERT(((W_)p & BLOCK_MASK) == 0);
913 /* already evacuated? */
915 /* Don't forget to set the failed_to_evac flag if we didn't get
916 * the desired destination (see comments in evacuate()).
918 if (bd->gen->no < evac_gen) {
919 failed_to_evac = rtsTrue;
925 /* remove from large_object list */
927 bd->back->link = bd->link;
928 } else { /* first object in the list */
929 step->large_objects = bd->link;
932 bd->link->back = bd->back;
935 /* link it on to the evacuated large object list of the destination step
938 if (step->gen->no < evac_gen) {
939 step = &generations[evac_gen].steps[0];
944 bd->link = step->new_large_objects;
945 step->new_large_objects = bd;
949 evacuate_mutable((StgMutClosure *)p);
953 /* -----------------------------------------------------------------------------
954 Adding a MUT_CONS to an older generation.
956 This is necessary from time to time when we end up with an
957 old-to-new generation pointer in a non-mutable object. We defer
958 the promotion until the next GC.
959 -------------------------------------------------------------------------- */
962 mkMutCons(StgClosure *ptr, generation *gen)
967 step = &gen->steps[0];
969 /* chain a new block onto the to-space for the destination step if
972 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
976 q = (StgMutVar *)step->hp;
977 step->hp += sizeofW(StgMutVar);
979 SET_HDR(q,&MUT_CONS_info,CCS_GC);
981 evacuate_mutable((StgMutClosure *)q);
983 return (StgClosure *)q;
986 /* -----------------------------------------------------------------------------
989 This is called (eventually) for every live object in the system.
991 The caller to evacuate specifies a desired generation in the
992 evac_gen global variable. The following conditions apply to
993 evacuating an object which resides in generation M when we're
994 collecting up to generation N
998 else evac to step->to
1000 if M < evac_gen evac to evac_gen, step 0
1002 if the object is already evacuated, then we check which generation
1005 if M >= evac_gen do nothing
1006 if M < evac_gen set failed_to_evac flag to indicate that we
1007 didn't manage to evacuate this object into evac_gen.
1009 -------------------------------------------------------------------------- */
1013 evacuate(StgClosure *q)
1017 const StgInfoTable *info;
1020 if (!LOOKS_LIKE_STATIC(q)) {
1022 if (bd->gen->no > N) {
1023 /* Can't evacuate this object, because it's in a generation
1024 * older than the ones we're collecting. Let's hope that it's
1025 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1027 if (bd->gen->no < evac_gen) {
1029 failed_to_evac = rtsTrue;
1035 /* make sure the info pointer is into text space */
1036 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1037 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1040 switch (info -> type) {
1043 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),bd);
1049 to = copy(q,sizeW_fromITBL(info),bd);
1051 evacuate_mutable((StgMutClosure *)to);
1058 case IND_OLDGEN_PERM:
1063 to = copy(q,sizeW_fromITBL(info),bd);
1069 to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),bd);
1074 to = copy(q,BLACKHOLE_sizeW(),bd);
1076 evacuate_mutable((StgMutClosure *)to);
1079 case THUNK_SELECTOR:
1081 const StgInfoTable* selectee_info;
1082 StgClosure* selectee = ((StgSelector*)q)->selectee;
1085 selectee_info = get_itbl(selectee);
1086 switch (selectee_info->type) {
1090 StgNat32 offset = info->layout.selector_offset;
1092 /* check that the size is in range */
1094 (StgNat32)(selectee_info->layout.payload.ptrs +
1095 selectee_info->layout.payload.nptrs));
1097 /* perform the selection! */
1098 q = selectee->payload[offset];
1100 /* if we're already in to-space, there's no need to continue
1101 * with the evacuation, just update the source address with
1102 * a pointer to the (evacuated) constructor field.
1104 if (IS_USER_PTR(q)) {
1105 bdescr *bd = Bdescr((P_)q);
1106 if (bd->evacuated) {
1107 if (bd->gen->no < evac_gen) {
1108 failed_to_evac = rtsTrue;
1114 /* otherwise, carry on and evacuate this constructor field,
1115 * (but not the constructor itself)
1124 case IND_OLDGEN_PERM:
1125 selectee = stgCast(StgInd *,selectee)->indirectee;
1129 selectee = stgCast(StgCAF *,selectee)->value;
1133 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1138 case THUNK_SELECTOR:
1139 /* aargh - do recursively???? */
1144 /* not evaluated yet */
1148 barf("evacuate: THUNK_SELECTOR: strange selectee");
1151 to = copy(q,THUNK_SELECTOR_sizeW(),bd);
1157 /* follow chains of indirections, don't evacuate them */
1158 q = ((StgInd*)q)->indirectee;
1161 /* ToDo: optimise STATIC_LINK for known cases.
1162 - FUN_STATIC : payload[0]
1163 - THUNK_STATIC : payload[1]
1164 - IND_STATIC : payload[1]
1168 if (info->srt_len == 0) { /* small optimisation */
1174 /* don't want to evacuate these, but we do want to follow pointers
1175 * from SRTs - see scavenge_static.
1178 /* put the object on the static list, if necessary.
1180 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1181 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1182 static_objects = (StgClosure *)q;
1186 case CONSTR_INTLIKE:
1187 case CONSTR_CHARLIKE:
1188 case CONSTR_NOCAF_STATIC:
1189 /* no need to put these on the static linked list, they don't need
1204 /* shouldn't see these */
1205 barf("evacuate: stack frame\n");
1209 /* these are special - the payload is a copy of a chunk of stack,
1211 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),bd);
1216 /* Already evacuated, just return the forwarding address.
1217 * HOWEVER: if the requested destination generation (evac_gen) is
1218 * older than the actual generation (because the object was
1219 * already evacuated to a younger generation) then we have to
1220 * set the failed_to_evac flag to indicate that we couldn't
1221 * manage to promote the object to the desired generation.
1223 if (evac_gen > 0) { /* optimisation */
1224 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1225 if (Bdescr((P_)p)->gen->no < evac_gen) {
1226 /* fprintf(stderr,"evac failed!\n");*/
1227 failed_to_evac = rtsTrue;
1230 return ((StgEvacuated*)q)->evacuee;
1235 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1237 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1238 evacuate_large((P_)q, rtsFalse);
1241 /* just copy the block */
1242 to = copy(q,size,bd);
1249 case MUT_ARR_PTRS_FROZEN:
1251 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1253 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1254 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1257 /* just copy the block */
1258 to = copy(q,size,bd);
1260 if (info->type == MUT_ARR_PTRS) {
1261 evacuate_mutable((StgMutClosure *)to);
1269 StgTSO *tso = stgCast(StgTSO *,q);
1270 nat size = tso_sizeW(tso);
1273 /* Large TSOs don't get moved, so no relocation is required.
1275 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1276 evacuate_large((P_)q, rtsTrue);
1279 /* To evacuate a small TSO, we need to relocate the update frame
1283 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),bd);
1285 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1287 /* relocate the stack pointers... */
1288 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1289 new_tso->sp = (StgPtr)new_tso->sp + diff;
1290 new_tso->splim = (StgPtr)new_tso->splim + diff;
1292 relocate_TSO(tso, new_tso);
1293 upd_evacuee(q,(StgClosure *)new_tso);
1295 evacuate_mutable((StgMutClosure *)new_tso);
1296 return (StgClosure *)new_tso;
1302 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1306 barf("evacuate: strange closure type");
1312 /* -----------------------------------------------------------------------------
1313 relocate_TSO is called just after a TSO has been copied from src to
1314 dest. It adjusts the update frame list for the new location.
1315 -------------------------------------------------------------------------- */
1318 relocate_TSO(StgTSO *src, StgTSO *dest)
1325 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1329 while ((P_)su < dest->stack + dest->stack_size) {
1330 switch (get_itbl(su)->type) {
1332 /* GCC actually manages to common up these three cases! */
1335 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1340 cf = (StgCatchFrame *)su;
1341 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1346 sf = (StgSeqFrame *)su;
1347 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1356 barf("relocate_TSO");
1365 scavenge_srt(const StgInfoTable *info)
1367 StgClosure **srt, **srt_end;
1369 /* evacuate the SRT. If srt_len is zero, then there isn't an
1370 * srt field in the info table. That's ok, because we'll
1371 * never dereference it.
1373 srt = stgCast(StgClosure **,info->srt);
1374 srt_end = srt + info->srt_len;
1375 for (; srt < srt_end; srt++) {
1380 /* -----------------------------------------------------------------------------
1381 Scavenge a given step until there are no more objects in this step
1384 evac_gen is set by the caller to be either zero (for a step in a
1385 generation < N) or G where G is the generation of the step being
1388 We sometimes temporarily change evac_gen back to zero if we're
1389 scavenging a mutable object where early promotion isn't such a good
1391 -------------------------------------------------------------------------- */
1395 scavenge(step *step)
1398 const StgInfoTable *info;
1400 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1405 failed_to_evac = rtsFalse;
1407 /* scavenge phase - standard breadth-first scavenging of the
1411 while (bd != step->hp_bd || p < step->hp) {
1413 /* If we're at the end of this block, move on to the next block */
1414 if (bd != step->hp_bd && p == bd->free) {
1420 q = p; /* save ptr to object */
1422 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1423 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1425 info = get_itbl((StgClosure *)p);
1426 switch (info -> type) {
1430 StgBCO* bco = stgCast(StgBCO*,p);
1432 for (i = 0; i < bco->n_ptrs; i++) {
1433 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1435 p += bco_sizeW(bco);
1440 /* treat MVars specially, because we don't want to evacuate the
1441 * mut_link field in the middle of the closure.
1444 StgMVar *mvar = ((StgMVar *)p);
1446 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1447 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1448 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1449 p += sizeofW(StgMVar);
1450 evac_gen = saved_evac_gen;
1463 case IND_OLDGEN_PERM:
1469 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1470 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1471 (StgClosure *)*p = evacuate((StgClosure *)*p);
1473 p += info->layout.payload.nptrs;
1478 /* ignore MUT_CONSs */
1479 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1481 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1482 evac_gen = saved_evac_gen;
1484 p += sizeofW(StgMutVar);
1489 p += BLACKHOLE_sizeW();
1494 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1495 (StgClosure *)bh->blocking_queue =
1496 evacuate((StgClosure *)bh->blocking_queue);
1497 if (failed_to_evac) {
1498 failed_to_evac = rtsFalse;
1499 evacuate_mutable((StgMutClosure *)bh);
1501 p += BLACKHOLE_sizeW();
1505 case THUNK_SELECTOR:
1507 StgSelector *s = (StgSelector *)p;
1508 s->selectee = evacuate(s->selectee);
1509 p += THUNK_SELECTOR_sizeW();
1515 barf("scavenge:IND???\n");
1517 case CONSTR_INTLIKE:
1518 case CONSTR_CHARLIKE:
1520 case CONSTR_NOCAF_STATIC:
1524 /* Shouldn't see a static object here. */
1525 barf("scavenge: STATIC object\n");
1537 /* Shouldn't see stack frames here. */
1538 barf("scavenge: stack frame\n");
1540 case AP_UPD: /* same as PAPs */
1542 /* Treat a PAP just like a section of stack, not forgetting to
1543 * evacuate the function pointer too...
1546 StgPAP* pap = stgCast(StgPAP*,p);
1548 pap->fun = evacuate(pap->fun);
1549 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1550 p += pap_sizeW(pap);
1556 /* nothing to follow */
1557 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1561 /* follow everything */
1565 evac_gen = 0; /* repeatedly mutable */
1566 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1567 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1568 (StgClosure *)*p = evacuate((StgClosure *)*p);
1570 evac_gen = saved_evac_gen;
1574 case MUT_ARR_PTRS_FROZEN:
1575 /* follow everything */
1577 StgPtr start = p, next;
1579 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1580 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1581 (StgClosure *)*p = evacuate((StgClosure *)*p);
1583 if (failed_to_evac) {
1584 /* we can do this easier... */
1585 evacuate_mutable((StgMutClosure *)start);
1586 failed_to_evac = rtsFalse;
1597 /* chase the link field for any TSOs on the same queue */
1598 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1599 /* scavenge this thread's stack */
1600 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1601 evac_gen = saved_evac_gen;
1602 p += tso_sizeW(tso);
1609 barf("scavenge: unimplemented/strange closure type\n");
1615 /* If we didn't manage to promote all the objects pointed to by
1616 * the current object, then we have to designate this object as
1617 * mutable (because it contains old-to-new generation pointers).
1619 if (failed_to_evac) {
1620 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1621 failed_to_evac = rtsFalse;
1629 /* -----------------------------------------------------------------------------
1630 Scavenge one object.
1632 This is used for objects that are temporarily marked as mutable
1633 because they contain old-to-new generation pointers. Only certain
1634 objects can have this property.
1635 -------------------------------------------------------------------------- */
1637 scavenge_one(StgPtr p)
1642 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1643 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1645 info = get_itbl((StgClosure *)p);
1647 switch (info -> type) {
1655 case IND_OLDGEN_PERM:
1661 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1662 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1663 (StgClosure *)*p = evacuate((StgClosure *)*p);
1672 case THUNK_SELECTOR:
1674 StgSelector *s = (StgSelector *)p;
1675 s->selectee = evacuate(s->selectee);
1679 case AP_UPD: /* same as PAPs */
1681 /* Treat a PAP just like a section of stack, not forgetting to
1682 * evacuate the function pointer too...
1685 StgPAP* pap = stgCast(StgPAP*,p);
1687 pap->fun = evacuate(pap->fun);
1688 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1693 /* This might happen if for instance a MUT_CONS was pointing to a
1694 * THUNK which has since been updated. The IND_OLDGEN will
1695 * be on the mutable list anyway, so we don't need to do anything
1701 barf("scavenge_one: strange object");
1704 no_luck = failed_to_evac;
1705 failed_to_evac = rtsFalse;
1710 /* -----------------------------------------------------------------------------
1711 Scavenging mutable lists.
1713 We treat the mutable list of each generation > N (i.e. all the
1714 generations older than the one being collected) as roots. We also
1715 remove non-mutable objects from the mutable list at this point.
1716 -------------------------------------------------------------------------- */
1718 static StgMutClosure *
1719 scavenge_mutable_list(StgMutClosure *p, nat gen)
1722 StgMutClosure *start;
1723 StgMutClosure **prev;
1730 failed_to_evac = rtsFalse;
1732 for (; p != END_MUT_LIST; p = *prev) {
1734 /* make sure the info pointer is into text space */
1735 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1736 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1739 switch(info->type) {
1741 case MUT_ARR_PTRS_FROZEN:
1742 /* remove this guy from the mutable list, but follow the ptrs
1743 * anyway (and make sure they get promoted to this gen).
1748 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1750 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1751 (StgClosure *)*q = evacuate((StgClosure *)*q);
1755 if (failed_to_evac) {
1756 failed_to_evac = rtsFalse;
1757 prev = &p->mut_link;
1759 *prev = p->mut_link;
1765 /* follow everything */
1766 prev = &p->mut_link;
1770 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1771 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1772 (StgClosure *)*q = evacuate((StgClosure *)*q);
1778 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1779 * it from the mutable list if possible by promoting whatever it
1782 if (p->header.info == &MUT_CONS_info) {
1784 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1785 /* didn't manage to promote everything, so leave the
1786 * MUT_CONS on the list.
1788 prev = &p->mut_link;
1790 *prev = p->mut_link;
1794 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1795 prev = &p->mut_link;
1801 StgMVar *mvar = (StgMVar *)p;
1802 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1803 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1804 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1805 prev = &p->mut_link;
1810 /* follow ptrs and remove this from the mutable list */
1812 StgTSO *tso = (StgTSO *)p;
1814 /* Don't bother scavenging if this thread is dead
1816 if (!(tso->whatNext == ThreadComplete ||
1817 tso->whatNext == ThreadKilled)) {
1818 /* Don't need to chase the link field for any TSOs on the
1819 * same queue. Just scavenge this thread's stack
1821 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1824 /* Don't take this TSO off the mutable list - it might still
1825 * point to some younger objects (because we set evac_gen to 0
1828 prev = &tso->mut_link;
1833 case IND_OLDGEN_PERM:
1835 /* Try to pull the indirectee into this generation, so we can
1836 * remove the indirection from the mutable list.
1839 ((StgIndOldGen *)p)->indirectee =
1840 evacuate(((StgIndOldGen *)p)->indirectee);
1843 if (failed_to_evac) {
1844 failed_to_evac = rtsFalse;
1845 prev = &p->mut_link;
1847 *prev = p->mut_link;
1848 /* the mut_link field of an IND_STATIC is overloaded as the
1849 * static link field too (it just so happens that we don't need
1850 * both at the same time), so we need to NULL it out when
1851 * removing this object from the mutable list because the static
1852 * link fields are all assumed to be NULL before doing a major
1861 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1862 (StgClosure *)bh->blocking_queue =
1863 evacuate((StgClosure *)bh->blocking_queue);
1864 prev = &p->mut_link;
1869 /* shouldn't have anything else on the mutables list */
1870 barf("scavenge_mutable_object: non-mutable object?");
1877 scavenge_static(void)
1879 StgClosure* p = static_objects;
1880 const StgInfoTable *info;
1882 /* Always evacuate straight to the oldest generation for static
1884 evac_gen = oldest_gen->no;
1886 /* keep going until we've scavenged all the objects on the linked
1888 while (p != END_OF_STATIC_LIST) {
1892 /* make sure the info pointer is into text space */
1893 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1894 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1896 /* Take this object *off* the static_objects list,
1897 * and put it on the scavenged_static_objects list.
1899 static_objects = STATIC_LINK(info,p);
1900 STATIC_LINK(info,p) = scavenged_static_objects;
1901 scavenged_static_objects = p;
1903 switch (info -> type) {
1907 StgInd *ind = (StgInd *)p;
1908 ind->indirectee = evacuate(ind->indirectee);
1910 /* might fail to evacuate it, in which case we have to pop it
1911 * back on the mutable list (and take it off the
1912 * scavenged_static list because the static link and mut link
1913 * pointers are one and the same).
1915 if (failed_to_evac) {
1916 failed_to_evac = rtsFalse;
1917 scavenged_static_objects = STATIC_LINK(info,p);
1918 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
1919 oldest_gen->mut_list = (StgMutClosure *)ind;
1933 next = (P_)p->payload + info->layout.payload.ptrs;
1934 /* evacuate the pointers */
1935 for (q = (P_)p->payload; q < next; q++) {
1936 (StgClosure *)*q = evacuate((StgClosure *)*q);
1942 barf("scavenge_static");
1945 ASSERT(failed_to_evac == rtsFalse);
1947 /* get the next static object from the list. Remeber, there might
1948 * be more stuff on this list now that we've done some evacuating!
1949 * (static_objects is a global)
1955 /* -----------------------------------------------------------------------------
1956 scavenge_stack walks over a section of stack and evacuates all the
1957 objects pointed to by it. We can use the same code for walking
1958 PAPs, since these are just sections of copied stack.
1959 -------------------------------------------------------------------------- */
1962 scavenge_stack(StgPtr p, StgPtr stack_end)
1965 const StgInfoTable* info;
1969 * Each time around this loop, we are looking at a chunk of stack
1970 * that starts with either a pending argument section or an
1971 * activation record.
1974 while (p < stack_end) {
1975 q = *stgCast(StgPtr*,p);
1977 /* If we've got a tag, skip over that many words on the stack */
1978 if (IS_ARG_TAG(stgCast(StgWord,q))) {
1983 /* Is q a pointer to a closure?
1985 if (! LOOKS_LIKE_GHC_INFO(q)) {
1988 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
1989 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
1991 /* otherwise, must be a pointer into the allocation space.
1995 (StgClosure *)*p = evacuate((StgClosure *)q);
2001 * Otherwise, q must be the info pointer of an activation
2002 * record. All activation records have 'bitmap' style layout
2005 info = get_itbl(stgCast(StgClosure*,p));
2007 switch (info->type) {
2009 /* Dynamic bitmap: the mask is stored on the stack */
2011 bitmap = stgCast(StgRetDyn*,p)->liveness;
2012 p = &payloadWord(stgCast(StgRetDyn*,p),0);
2015 /* probably a slow-entry point return address: */
2021 /* Specialised code for update frames, since they're so common.
2022 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2023 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2027 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2029 StgClosureType type = get_itbl(frame->updatee)->type;
2031 p += sizeofW(StgUpdateFrame);
2032 if (type == EVACUATED) {
2033 frame->updatee = evacuate(frame->updatee);
2036 bdescr *bd = Bdescr((P_)frame->updatee);
2037 if (bd->gen->no > N) {
2038 if (bd->gen->no < evac_gen) {
2039 failed_to_evac = rtsTrue;
2046 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2047 sizeofW(StgHeader), bd);
2048 upd_evacuee(frame->updatee,to);
2049 frame->updatee = to;
2052 to = copy(frame->updatee, BLACKHOLE_sizeW(), bd);
2053 upd_evacuee(frame->updatee,to);
2054 frame->updatee = to;
2055 evacuate_mutable((StgMutClosure *)to);
2058 barf("scavenge_stack: UPDATE_FRAME updatee");
2063 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2070 bitmap = info->layout.bitmap;
2073 while (bitmap != 0) {
2074 if ((bitmap & 1) == 0) {
2075 (StgClosure *)*p = evacuate((StgClosure *)*p);
2078 bitmap = bitmap >> 1;
2085 /* large bitmap (> 32 entries) */
2090 StgLargeBitmap *large_bitmap;
2093 large_bitmap = info->layout.large_bitmap;
2096 for (i=0; i<large_bitmap->size; i++) {
2097 bitmap = large_bitmap->bitmap[i];
2098 q = p + sizeof(W_) * 8;
2099 while (bitmap != 0) {
2100 if ((bitmap & 1) == 0) {
2101 (StgClosure *)*p = evacuate((StgClosure *)*p);
2104 bitmap = bitmap >> 1;
2106 if (i+1 < large_bitmap->size) {
2108 (StgClosure *)*p = evacuate((StgClosure *)*p);
2114 /* and don't forget to follow the SRT */
2119 barf("scavenge_stack: weird activation record found on stack.\n");
2124 /*-----------------------------------------------------------------------------
2125 scavenge the large object list.
2127 evac_gen set by caller; similar games played with evac_gen as with
2128 scavenge() - see comment at the top of scavenge(). Most large
2129 objects are (repeatedly) mutable, so most of the time evac_gen will
2131 --------------------------------------------------------------------------- */
2134 scavenge_large(step *step)
2138 const StgInfoTable* info;
2139 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2141 evac_gen = 0; /* most objects are mutable */
2142 bd = step->new_large_objects;
2144 for (; bd != NULL; bd = step->new_large_objects) {
2146 /* take this object *off* the large objects list and put it on
2147 * the scavenged large objects list. This is so that we can
2148 * treat new_large_objects as a stack and push new objects on
2149 * the front when evacuating.
2151 step->new_large_objects = bd->link;
2152 dbl_link_onto(bd, &step->scavenged_large_objects);
2155 info = get_itbl(stgCast(StgClosure*,p));
2157 switch (info->type) {
2159 /* only certain objects can be "large"... */
2163 /* nothing to follow */
2167 /* follow everything */
2171 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2172 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2173 (StgClosure *)*p = evacuate((StgClosure *)*p);
2178 case MUT_ARR_PTRS_FROZEN:
2179 /* follow everything */
2181 StgPtr start = p, next;
2183 evac_gen = saved_evac_gen; /* not really mutable */
2184 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2185 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2186 (StgClosure *)*p = evacuate((StgClosure *)*p);
2189 if (failed_to_evac) {
2190 evacuate_mutable((StgMutClosure *)start);
2197 StgBCO* bco = stgCast(StgBCO*,p);
2199 evac_gen = saved_evac_gen;
2200 for (i = 0; i < bco->n_ptrs; i++) {
2201 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2212 /* chase the link field for any TSOs on the same queue */
2213 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2214 /* scavenge this thread's stack */
2215 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2220 barf("scavenge_large: unknown/strange object");
2226 zeroStaticObjectList(StgClosure* first_static)
2230 const StgInfoTable *info;
2232 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2234 link = STATIC_LINK(info, p);
2235 STATIC_LINK(info,p) = NULL;
2239 /* This function is only needed because we share the mutable link
2240 * field with the static link field in an IND_STATIC, so we have to
2241 * zero the mut_link field before doing a major GC, which needs the
2242 * static link field.
2244 * It doesn't do any harm to zero all the mutable link fields on the
2248 zeroMutableList(StgMutClosure *first)
2250 StgMutClosure *next, *c;
2252 for (c = first; c != END_MUT_LIST; c = next) {
2258 /* -----------------------------------------------------------------------------
2260 -------------------------------------------------------------------------- */
2262 void RevertCAFs(void)
2264 while (enteredCAFs != END_CAF_LIST) {
2265 StgCAF* caf = enteredCAFs;
2267 enteredCAFs = caf->link;
2268 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2269 SET_INFO(caf,&CAF_UNENTERED_info);
2270 caf->value = stgCast(StgClosure*,0xdeadbeef);
2271 caf->link = stgCast(StgCAF*,0xdeadbeef);
2275 void revertDeadCAFs(void)
2277 StgCAF* caf = enteredCAFs;
2278 enteredCAFs = END_CAF_LIST;
2279 while (caf != END_CAF_LIST) {
2280 StgCAF* next = caf->link;
2282 switch(GET_INFO(caf)->type) {
2285 /* This object has been evacuated, it must be live. */
2286 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2287 new->link = enteredCAFs;
2293 SET_INFO(caf,&CAF_UNENTERED_info);
2294 caf->value = stgCast(StgClosure*,0xdeadbeef);
2295 caf->link = stgCast(StgCAF*,0xdeadbeef);
2299 barf("revertDeadCAFs: enteredCAFs list corrupted");
2305 /* -----------------------------------------------------------------------------
2306 Sanity code for CAF garbage collection.
2308 With DEBUG turned on, we manage a CAF list in addition to the SRT
2309 mechanism. After GC, we run down the CAF list and blackhole any
2310 CAFs which have been garbage collected. This means we get an error
2311 whenever the program tries to enter a garbage collected CAF.
2313 Any garbage collected CAFs are taken off the CAF list at the same
2315 -------------------------------------------------------------------------- */
2323 const StgInfoTable *info;
2334 ASSERT(info->type == IND_STATIC);
2336 if (STATIC_LINK(info,p) == NULL) {
2337 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2339 SET_INFO(p,&BLACKHOLE_info);
2340 p = STATIC_LINK2(info,p);
2344 pp = &STATIC_LINK2(info,p);
2351 /* fprintf(stderr, "%d CAFs live\n", i); */
2355 /* -----------------------------------------------------------------------------
2358 Whenever a thread returns to the scheduler after possibly doing
2359 some work, we have to run down the stack and black-hole all the
2360 closures referred to by update frames.
2361 -------------------------------------------------------------------------- */
2364 threadLazyBlackHole(StgTSO *tso)
2366 StgUpdateFrame *update_frame;
2367 StgBlockingQueue *bh;
2370 stack_end = &tso->stack[tso->stack_size];
2371 update_frame = tso->su;
2374 switch (get_itbl(update_frame)->type) {
2377 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2381 bh = (StgBlockingQueue *)update_frame->updatee;
2383 /* if the thunk is already blackholed, it means we've also
2384 * already blackholed the rest of the thunks on this stack,
2385 * so we can stop early.
2387 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
2388 * don't interfere with this optimisation.
2390 if (bh->header.info == &BLACKHOLE_info) {
2394 if (bh->header.info != &BLACKHOLE_BQ_info &&
2395 bh->header.info != &CAF_BLACKHOLE_info) {
2396 SET_INFO(bh,&BLACKHOLE_info);
2399 update_frame = update_frame->link;
2403 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2409 barf("threadPaused");
2414 /* -----------------------------------------------------------------------------
2417 * Code largely pinched from old RTS, then hacked to bits. We also do
2418 * lazy black holing here.
2420 * -------------------------------------------------------------------------- */
2423 threadSqueezeStack(StgTSO *tso)
2425 lnat displacement = 0;
2426 StgUpdateFrame *frame;
2427 StgUpdateFrame *next_frame; /* Temporally next */
2428 StgUpdateFrame *prev_frame; /* Temporally previous */
2430 rtsBool prev_was_update_frame;
2432 bottom = &(tso->stack[tso->stack_size]);
2435 /* There must be at least one frame, namely the STOP_FRAME.
2437 ASSERT((P_)frame < bottom);
2439 /* Walk down the stack, reversing the links between frames so that
2440 * we can walk back up as we squeeze from the bottom. Note that
2441 * next_frame and prev_frame refer to next and previous as they were
2442 * added to the stack, rather than the way we see them in this
2443 * walk. (It makes the next loop less confusing.)
2445 * Stop if we find an update frame pointing to a black hole
2446 * (see comment in threadLazyBlackHole()).
2450 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2451 prev_frame = frame->link;
2452 frame->link = next_frame;
2455 if (get_itbl(frame)->type == UPDATE_FRAME
2456 && frame->updatee->header.info == &BLACKHOLE_info) {
2461 /* Now, we're at the bottom. Frame points to the lowest update
2462 * frame on the stack, and its link actually points to the frame
2463 * above. We have to walk back up the stack, squeezing out empty
2464 * update frames and turning the pointers back around on the way
2467 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2468 * we never want to eliminate it anyway. Just walk one step up
2469 * before starting to squeeze. When you get to the topmost frame,
2470 * remember that there are still some words above it that might have
2477 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2480 * Loop through all of the frames (everything except the very
2481 * bottom). Things are complicated by the fact that we have
2482 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2483 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2485 while (frame != NULL) {
2487 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2488 rtsBool is_update_frame;
2490 next_frame = frame->link;
2491 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2494 * 1. both the previous and current frame are update frames
2495 * 2. the current frame is empty
2497 if (prev_was_update_frame && is_update_frame &&
2498 (P_)prev_frame == frame_bottom + displacement) {
2500 /* Now squeeze out the current frame */
2501 StgClosure *updatee_keep = prev_frame->updatee;
2502 StgClosure *updatee_bypass = frame->updatee;
2505 fprintf(stderr, "squeezing frame at %p\n", frame);
2508 /* Deal with blocking queues. If both updatees have blocked
2509 * threads, then we should merge the queues into the update
2510 * frame that we're keeping.
2512 * Alternatively, we could just wake them up: they'll just go
2513 * straight to sleep on the proper blackhole! This is less code
2514 * and probably less bug prone, although it's probably much
2517 #if 0 /* do it properly... */
2518 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2519 /* Sigh. It has one. Don't lose those threads! */
2520 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2521 /* Urgh. Two queues. Merge them. */
2522 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2524 while (keep_tso->link != END_TSO_QUEUE) {
2525 keep_tso = keep_tso->link;
2527 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2530 /* For simplicity, just swap the BQ for the BH */
2531 P_ temp = updatee_keep;
2533 updatee_keep = updatee_bypass;
2534 updatee_bypass = temp;
2536 /* Record the swap in the kept frame (below) */
2537 prev_frame->updatee = updatee_keep;
2542 TICK_UPD_SQUEEZED();
2543 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2545 sp = (P_)frame - 1; /* sp = stuff to slide */
2546 displacement += sizeofW(StgUpdateFrame);
2549 /* No squeeze for this frame */
2550 sp = frame_bottom - 1; /* Keep the current frame */
2552 /* Do lazy black-holing.
2554 if (is_update_frame) {
2555 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2556 if (bh->header.info != &BLACKHOLE_BQ_info &&
2557 bh->header.info != &CAF_BLACKHOLE_info) {
2558 SET_INFO(bh,&BLACKHOLE_info);
2562 /* Fix the link in the current frame (should point to the frame below) */
2563 frame->link = prev_frame;
2564 prev_was_update_frame = is_update_frame;
2567 /* Now slide all words from sp up to the next frame */
2569 if (displacement > 0) {
2570 P_ next_frame_bottom;
2572 if (next_frame != NULL)
2573 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2575 next_frame_bottom = tso->sp - 1;
2578 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2582 while (sp >= next_frame_bottom) {
2583 sp[displacement] = *sp;
2587 (P_)prev_frame = (P_)frame + displacement;
2591 tso->sp += displacement;
2592 tso->su = prev_frame;
2595 /* -----------------------------------------------------------------------------
2598 * We have to prepare for GC - this means doing lazy black holing
2599 * here. We also take the opportunity to do stack squeezing if it's
2601 * -------------------------------------------------------------------------- */
2604 threadPaused(StgTSO *tso)
2606 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2607 threadSqueezeStack(tso); /* does black holing too */
2609 threadLazyBlackHole(tso);