1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.20 1999/01/26 16:16:22 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
23 #include "StablePriv.h"
27 /* STATIC OBJECT LIST.
30 * We maintain a linked list of static objects that are still live.
31 * The requirements for this list are:
33 * - we need to scan the list while adding to it, in order to
34 * scavenge all the static objects (in the same way that
35 * breadth-first scavenging works for dynamic objects).
37 * - we need to be able to tell whether an object is already on
38 * the list, to break loops.
40 * Each static object has a "static link field", which we use for
41 * linking objects on to the list. We use a stack-type list, consing
42 * objects on the front as they are added (this means that the
43 * scavenge phase is depth-first, not breadth-first, but that
46 * A separate list is kept for objects that have been scavenged
47 * already - this is so that we can zero all the marks afterwards.
49 * An object is on the list if its static link field is non-zero; this
50 * means that we have to mark the end of the list with '1', not NULL.
52 * Extra notes for generational GC:
54 * Each generation has a static object list associated with it. When
55 * collecting generations up to N, we treat the static object lists
56 * from generations > N as roots.
58 * We build up a static object list while collecting generations 0..N,
59 * which is then appended to the static object list of generation N+1.
61 StgClosure* static_objects; /* live static objects */
62 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
64 /* N is the oldest generation being collected, where the generations
65 * are numbered starting at 0. A major GC (indicated by the major_gc
66 * flag) is when we're collecting all generations. We only attempt to
67 * deal with static objects and GC CAFs when doing a major GC.
70 static rtsBool major_gc;
72 /* Youngest generation that objects should be evacuated to in
73 * evacuate(). (Logically an argument to evacuate, but it's static
74 * a lot of the time so we optimise it into a global variable).
80 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
81 static rtsBool weak_done; /* all done for this pass */
83 /* Flag indicating failure to evacuate an object to the desired
86 static rtsBool failed_to_evac;
88 /* Old to-space (used for two-space collector only)
92 /* -----------------------------------------------------------------------------
93 Static function declarations
94 -------------------------------------------------------------------------- */
96 static StgClosure *evacuate(StgClosure *q);
97 static void zeroStaticObjectList(StgClosure* first_static);
98 static rtsBool traverse_weak_ptr_list(void);
99 static void zeroMutableList(StgMutClosure *first);
100 static void revertDeadCAFs(void);
102 static void scavenge_stack(StgPtr p, StgPtr stack_end);
103 static void scavenge_large(step *step);
104 static void scavenge(step *step);
105 static void scavenge_static(void);
106 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
109 static void gcCAFs(void);
112 /* -----------------------------------------------------------------------------
115 For garbage collecting generation N (and all younger generations):
117 - follow all pointers in the root set. the root set includes all
118 mutable objects in all steps in all generations.
120 - for each pointer, evacuate the object it points to into either
121 + to-space in the next higher step in that generation, if one exists,
122 + if the object's generation == N, then evacuate it to the next
123 generation if one exists, or else to-space in the current
125 + if the object's generation < N, then evacuate it to to-space
126 in the next generation.
128 - repeatedly scavenge to-space from each step in each generation
129 being collected until no more objects can be evacuated.
131 - free from-space in each step, and set from-space = to-space.
133 -------------------------------------------------------------------------- */
135 void GarbageCollect(void (*get_roots)(void))
139 lnat live, allocated, collected = 0;
143 CostCentreStack *prev_CCS;
146 /* tell the stats department that we've started a GC */
149 /* attribute any costs to CCS_GC */
155 /* We might have been called from Haskell land by _ccall_GC, in
156 * which case we need to call threadPaused() because the scheduler
157 * won't have done it.
159 if (CurrentTSO) { threadPaused(CurrentTSO); }
161 /* Approximate how much we allocated: number of blocks in the
162 * nursery + blocks allocated via allocate() - unused nusery blocks.
163 * This leaves a little slop at the end of each block, and doesn't
164 * take into account large objects (ToDo).
166 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
167 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
168 allocated -= BLOCK_SIZE_W;
171 /* Figure out which generation to collect
174 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
175 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
179 major_gc = (N == RtsFlags.GcFlags.generations-1);
181 /* check stack sanity *before* GC (ToDo: check all threads) */
182 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
183 IF_DEBUG(sanity, checkFreeListSanity());
185 /* Initialise the static object lists
187 static_objects = END_OF_STATIC_LIST;
188 scavenged_static_objects = END_OF_STATIC_LIST;
190 /* zero the mutable list for the oldest generation (see comment by
191 * zeroMutableList below).
194 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
197 /* Save the old to-space if we're doing a two-space collection
199 if (RtsFlags.GcFlags.generations == 1) {
200 old_to_space = g0s0->to_space;
201 g0s0->to_space = NULL;
204 /* Initialise to-space in all the generations/steps that we're
207 for (g = 0; g <= N; g++) {
208 generations[g].mut_list = END_MUT_LIST;
210 for (s = 0; s < generations[g].n_steps; s++) {
212 /* generation 0, step 0 doesn't need to-space */
213 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
217 /* Get a free block for to-space. Extra blocks will be chained on
221 step = &generations[g].steps[s];
222 ASSERT(step->gen->no == g);
223 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
224 bd->gen = &generations[g];
227 bd->evacuated = 1; /* it's a to-space block */
228 step->hp = bd->start;
229 step->hpLim = step->hp + BLOCK_SIZE_W;
232 step->to_blocks = 1; /* ???? */
233 step->scan = bd->start;
235 step->new_large_objects = NULL;
236 step->scavenged_large_objects = NULL;
237 /* mark the large objects as not evacuated yet */
238 for (bd = step->large_objects; bd; bd = bd->link) {
244 /* make sure the older generations have at least one block to
245 * allocate into (this makes things easier for copy(), see below.
247 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
248 for (s = 0; s < generations[g].n_steps; s++) {
249 step = &generations[g].steps[s];
250 if (step->hp_bd == NULL) {
252 bd->gen = &generations[g];
255 bd->evacuated = 0; /* *not* a to-space block */
256 step->hp = bd->start;
257 step->hpLim = step->hp + BLOCK_SIZE_W;
262 /* Set the scan pointer for older generations: remember we
263 * still have to scavenge objects that have been promoted. */
264 step->scan = step->hp;
265 step->scan_bd = step->hp_bd;
266 step->to_space = NULL;
268 step->new_large_objects = NULL;
269 step->scavenged_large_objects = NULL;
273 /* -----------------------------------------------------------------------
274 * follow all the roots that we know about:
275 * - mutable lists from each generation > N
276 * we want to *scavenge* these roots, not evacuate them: they're not
277 * going to move in this GC.
278 * Also: do them in reverse generation order. This is because we
279 * often want to promote objects that are pointed to by older
280 * generations early, so we don't have to repeatedly copy them.
281 * Doing the generations in reverse order ensures that we don't end
282 * up in the situation where we want to evac an object to gen 3 and
283 * it has already been evaced to gen 2.
286 StgMutClosure *tmp, **pp;
287 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
288 generations[g].saved_mut_list = generations[g].mut_list;
289 generations[g].mut_list = END_MUT_LIST;
292 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
293 tmp = scavenge_mutable_list(generations[g].saved_mut_list, g);
294 pp = &generations[g].mut_list;
295 while (*pp != END_MUT_LIST) {
296 pp = &(*pp)->mut_link;
302 /* follow all the roots that the application knows about.
307 /* And don't forget to mark the TSO if we got here direct from
310 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
313 /* Mark the weak pointer list, and prepare to detect dead weak
317 old_weak_ptr_list = weak_ptr_list;
318 weak_ptr_list = NULL;
319 weak_done = rtsFalse;
321 /* Mark the stable pointer table.
323 markStablePtrTable(major_gc);
327 /* ToDo: To fix the caf leak, we need to make the commented out
328 * parts of this code do something sensible - as described in
331 extern void markHugsObjects(void);
333 /* ToDo: This (undefined) function should contain the scavenge
334 * loop immediately below this block of code - but I'm not sure
335 * enough of the details to do this myself.
337 scavengeEverything();
338 /* revert dead CAFs and update enteredCAFs list */
343 /* This will keep the CAFs and the attached BCOs alive
344 * but the values will have been reverted
346 scavengeEverything();
351 /* -------------------------------------------------------------------------
352 * Repeatedly scavenge all the areas we know about until there's no
353 * more scavenging to be done.
360 /* scavenge static objects */
361 if (major_gc && static_objects != END_OF_STATIC_LIST) {
365 /* When scavenging the older generations: Objects may have been
366 * evacuated from generations <= N into older generations, and we
367 * need to scavenge these objects. We're going to try to ensure that
368 * any evacuations that occur move the objects into at least the
369 * same generation as the object being scavenged, otherwise we
370 * have to create new entries on the mutable list for the older
374 /* scavenge each step in generations 0..maxgen */
377 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
378 for (s = 0; s < generations[gen].n_steps; s++) {
379 step = &generations[gen].steps[s];
381 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
385 if (step->new_large_objects != NULL) {
386 scavenge_large(step);
392 if (flag) { goto loop; }
394 /* must be last... */
395 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
400 /* Now see which stable names are still alive
402 gcStablePtrTable(major_gc);
404 /* Set the maximum blocks for the oldest generation, based on twice
405 * the amount of live data now, adjusted to fit the maximum heap
408 * This is an approximation, since in the worst case we'll need
409 * twice the amount of live data plus whatever space the other
412 if (RtsFlags.GcFlags.generations > 1) {
414 oldest_gen->max_blocks =
415 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
416 RtsFlags.GcFlags.minOldGenSize);
417 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
418 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
419 if (((int)oldest_gen->max_blocks -
420 (int)oldest_gen->steps[0].to_blocks) <
421 (RtsFlags.GcFlags.pcFreeHeap *
422 RtsFlags.GcFlags.maxHeapSize / 200)) {
428 /* For a two-space collector, we need to resize the nursery. */
430 /* set up a new nursery. Allocate a nursery size based on a
431 * function of the amount of live data (currently a factor of 2,
432 * should be configurable (ToDo)). Use the blocks from the old
433 * nursery if possible, freeing up any left over blocks.
435 * If we get near the maximum heap size, then adjust our nursery
436 * size accordingly. If the nursery is the same size as the live
437 * data (L), then we need 3L bytes. We can reduce the size of the
438 * nursery to bring the required memory down near 2L bytes.
440 * A normal 2-space collector would need 4L bytes to give the same
441 * performance we get from 3L bytes, reducing to the same
442 * performance at 2L bytes.
444 nat blocks = g0s0->to_blocks;
446 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
447 RtsFlags.GcFlags.maxHeapSize ) {
448 int adjusted_blocks; /* signed on purpose */
451 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
452 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
453 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
454 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
457 blocks = adjusted_blocks;
460 blocks *= RtsFlags.GcFlags.oldGenFactor;
461 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
462 blocks = RtsFlags.GcFlags.minAllocAreaSize;
466 if (nursery_blocks < blocks) {
467 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
469 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
473 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
475 for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
483 g0s0->n_blocks = nursery_blocks = blocks;
486 /* run through all the generations/steps and tidy up
488 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
491 generations[g].collections++; /* for stats */
494 for (s = 0; s < generations[g].n_steps; s++) {
496 step = &generations[g].steps[s];
498 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
499 /* Tidy the end of the to-space chains */
500 step->hp_bd->free = step->hp;
501 step->hp_bd->link = NULL;
504 /* for generations we collected... */
507 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
509 /* free old memory and shift to-space into from-space for all
510 * the collected steps (except the allocation area). These
511 * freed blocks will probaby be quickly recycled.
513 if (!(g == 0 && s == 0)) {
514 freeChain(step->blocks);
515 step->blocks = step->to_space;
516 step->n_blocks = step->to_blocks;
517 step->to_space = NULL;
519 for (bd = step->blocks; bd != NULL; bd = bd->link) {
520 bd->evacuated = 0; /* now from-space */
524 /* LARGE OBJECTS. The current live large objects are chained on
525 * scavenged_large, having been moved during garbage
526 * collection from large_objects. Any objects left on
527 * large_objects list are therefore dead, so we free them here.
529 for (bd = step->large_objects; bd != NULL; bd = next) {
534 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
537 step->large_objects = step->scavenged_large_objects;
539 /* Set the maximum blocks for this generation, interpolating
540 * between the maximum size of the oldest and youngest
543 * max_blocks = alloc_area_size +
544 * (oldgen_max_blocks - alloc_area_size) * G
545 * -----------------------------------------
549 generations[g].max_blocks =
550 RtsFlags.GcFlags.minAllocAreaSize +
551 (((oldest_gen->max_blocks - RtsFlags.GcFlags.minAllocAreaSize) * g)
552 / (RtsFlags.GcFlags.generations-1));
555 /* for older generations... */
558 /* For older generations, we need to append the
559 * scavenged_large_object list (i.e. large objects that have been
560 * promoted during this GC) to the large_object list for that step.
562 for (bd = step->scavenged_large_objects; bd; bd = next) {
565 dbl_link_onto(bd, &step->large_objects);
568 /* add the new blocks we promoted during this GC */
569 step->n_blocks += step->to_blocks;
574 /* Two-space collector:
575 * Free the old to-space, and estimate the amount of live data.
577 if (RtsFlags.GcFlags.generations == 1) {
578 if (old_to_space != NULL) {
579 freeChain(old_to_space);
581 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
582 bd->evacuated = 0; /* now from-space */
584 live = g0s0->to_blocks * BLOCK_SIZE_W +
585 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
587 /* Generational collector:
588 * estimate the amount of live data.
592 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
593 for (s = 0; s < generations[g].n_steps; s++) {
594 /* approximate amount of live data (doesn't take into account slop
595 * at end of each block). ToDo: this more accurately.
597 if (g == 0 && s == 0) { continue; }
598 step = &generations[g].steps[s];
599 live += step->n_blocks * BLOCK_SIZE_W +
600 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
605 /* revert dead CAFs and update enteredCAFs list */
608 /* mark the garbage collected CAFs as dead */
610 if (major_gc) { gcCAFs(); }
613 /* zero the scavenged static object list */
615 zeroStaticObjectList(scavenged_static_objects);
620 for (bd = g0s0->blocks; bd; bd = bd->link) {
621 bd->free = bd->start;
622 ASSERT(bd->gen == g0);
623 ASSERT(bd->step == g0s0);
625 current_nursery = g0s0->blocks;
627 /* Free the small objects allocated via allocate(), since this will
628 * all have been copied into G0S1 now.
630 if (small_alloc_list != NULL) {
631 freeChain(small_alloc_list);
633 small_alloc_list = NULL;
635 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
637 /* start any pending finalisers */
638 scheduleFinalisers(old_weak_ptr_list);
640 /* check sanity after GC */
642 if (RtsFlags.GcFlags.generations == 1) {
643 IF_DEBUG(sanity, checkHeap(g0s0->to_space, NULL));
644 IF_DEBUG(sanity, checkChain(g0s0->large_objects));
647 for (g = 0; g <= N; g++) {
648 for (s = 0; s < generations[g].n_steps; s++) {
649 if (g == 0 && s == 0) { continue; }
650 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
653 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
654 for (s = 0; s < generations[g].n_steps; s++) {
655 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks,
656 generations[g].steps[s].blocks->start));
657 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
660 IF_DEBUG(sanity, checkFreeListSanity());
664 IF_DEBUG(gc, stat_describe_gens());
667 /* symbol-table based profiling */
668 /* heapCensus(to_space); */ /* ToDo */
671 /* restore enclosing cost centre */
676 /* check for memory leaks if sanity checking is on */
677 IF_DEBUG(sanity, memInventory());
679 /* ok, GC over: tell the stats department what happened. */
680 stat_endGC(allocated, collected, live, N);
683 /* -----------------------------------------------------------------------------
686 traverse_weak_ptr_list is called possibly many times during garbage
687 collection. It returns a flag indicating whether it did any work
688 (i.e. called evacuate on any live pointers).
690 Invariant: traverse_weak_ptr_list is called when the heap is in an
691 idempotent state. That means that there are no pending
692 evacuate/scavenge operations. This invariant helps the weak
693 pointer code decide which weak pointers are dead - if there are no
694 new live weak pointers, then all the currently unreachable ones are
697 For generational GC: we just don't try to finalise weak pointers in
698 older generations than the one we're collecting. This could
699 probably be optimised by keeping per-generation lists of weak
700 pointers, but for a few weak pointers this scheme will work.
701 -------------------------------------------------------------------------- */
704 traverse_weak_ptr_list(void)
706 StgWeak *w, **last_w, *next_w;
708 rtsBool flag = rtsFalse;
710 if (weak_done) { return rtsFalse; }
712 /* doesn't matter where we evacuate values/finalisers to, since
713 * these pointers are treated as roots (iff the keys are alive).
717 last_w = &old_weak_ptr_list;
718 for (w = old_weak_ptr_list; w; w = next_w) {
720 if ((new = isAlive(w->key))) {
722 /* evacuate the value and finaliser */
723 w->value = evacuate(w->value);
724 w->finaliser = evacuate(w->finaliser);
725 /* remove this weak ptr from the old_weak_ptr list */
727 /* and put it on the new weak ptr list */
729 w->link = weak_ptr_list;
732 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
742 /* If we didn't make any changes, then we can go round and kill all
743 * the dead weak pointers. The old_weak_ptr list is used as a list
744 * of pending finalisers later on.
746 if (flag == rtsFalse) {
747 for (w = old_weak_ptr_list; w; w = w->link) {
748 w->value = evacuate(w->value);
749 w->finaliser = evacuate(w->finaliser);
757 /* -----------------------------------------------------------------------------
758 isAlive determines whether the given closure is still alive (after
759 a garbage collection) or not. It returns the new address of the
760 closure if it is alive, or NULL otherwise.
761 -------------------------------------------------------------------------- */
764 isAlive(StgClosure *p)
772 /* ToDo: for static closures, check the static link field.
773 * Problem here is that we sometimes don't set the link field, eg.
774 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
777 /* ignore closures in generations that we're not collecting. */
778 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
782 switch (info->type) {
787 case IND_OLDGEN: /* rely on compatible layout with StgInd */
788 case IND_OLDGEN_PERM:
789 /* follow indirections */
790 p = ((StgInd *)p)->indirectee;
795 return ((StgEvacuated *)p)->evacuee;
805 MarkRoot(StgClosure *root)
807 return evacuate(root);
810 static void addBlock(step *step)
812 bdescr *bd = allocBlock();
816 if (step->gen->no <= N) {
822 step->hp_bd->free = step->hp;
823 step->hp_bd->link = bd;
824 step->hp = bd->start;
825 step->hpLim = step->hp + BLOCK_SIZE_W;
830 static __inline__ StgClosure *
831 copy(StgClosure *src, nat size, step *step)
835 /* Find out where we're going, using the handy "to" pointer in
836 * the step of the source object. If it turns out we need to
837 * evacuate to an older generation, adjust it here (see comment
840 if (step->gen->no < evac_gen) {
841 step = &generations[evac_gen].steps[0];
844 /* chain a new block onto the to-space for the destination step if
847 if (step->hp + size >= step->hpLim) {
851 for(to = step->hp, from = (P_)src; size>0; --size) {
857 return (StgClosure *)dest;
860 /* Special version of copy() for when we only want to copy the info
861 * pointer of an object, but reserve some padding after it. This is
862 * used to optimise evacuation of BLACKHOLEs.
865 static __inline__ StgClosure *
866 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
870 if (step->gen->no < evac_gen) {
871 step = &generations[evac_gen].steps[0];
874 if (step->hp + size_to_reserve >= step->hpLim) {
878 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
883 step->hp += size_to_reserve;
884 return (StgClosure *)dest;
887 static __inline__ void
888 upd_evacuee(StgClosure *p, StgClosure *dest)
890 StgEvacuated *q = (StgEvacuated *)p;
892 SET_INFO(q,&EVACUATED_info);
896 /* -----------------------------------------------------------------------------
897 Evacuate a mutable object
899 If we evacuate a mutable object to an old generation, cons the
900 object onto the older generation's mutable list.
901 -------------------------------------------------------------------------- */
904 evacuate_mutable(StgMutClosure *c)
909 if (bd->gen->no > 0) {
910 c->mut_link = bd->gen->mut_list;
911 bd->gen->mut_list = c;
915 /* -----------------------------------------------------------------------------
916 Evacuate a large object
918 This just consists of removing the object from the (doubly-linked)
919 large_alloc_list, and linking it on to the (singly-linked)
920 new_large_objects list, from where it will be scavenged later.
922 Convention: bd->evacuated is /= 0 for a large object that has been
923 evacuated, or 0 otherwise.
924 -------------------------------------------------------------------------- */
927 evacuate_large(StgPtr p, rtsBool mutable)
929 bdescr *bd = Bdescr(p);
932 /* should point to the beginning of the block */
933 ASSERT(((W_)p & BLOCK_MASK) == 0);
935 /* already evacuated? */
937 /* Don't forget to set the failed_to_evac flag if we didn't get
938 * the desired destination (see comments in evacuate()).
940 if (bd->gen->no < evac_gen) {
941 failed_to_evac = rtsTrue;
942 TICK_GC_FAILED_PROMOTION();
948 /* remove from large_object list */
950 bd->back->link = bd->link;
951 } else { /* first object in the list */
952 step->large_objects = bd->link;
955 bd->link->back = bd->back;
958 /* link it on to the evacuated large object list of the destination step
961 if (step->gen->no < evac_gen) {
962 step = &generations[evac_gen].steps[0];
967 bd->link = step->new_large_objects;
968 step->new_large_objects = bd;
972 evacuate_mutable((StgMutClosure *)p);
976 /* -----------------------------------------------------------------------------
977 Adding a MUT_CONS to an older generation.
979 This is necessary from time to time when we end up with an
980 old-to-new generation pointer in a non-mutable object. We defer
981 the promotion until the next GC.
982 -------------------------------------------------------------------------- */
985 mkMutCons(StgClosure *ptr, generation *gen)
990 step = &gen->steps[0];
992 /* chain a new block onto the to-space for the destination step if
995 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
999 q = (StgMutVar *)step->hp;
1000 step->hp += sizeofW(StgMutVar);
1002 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1004 evacuate_mutable((StgMutClosure *)q);
1006 return (StgClosure *)q;
1009 /* -----------------------------------------------------------------------------
1012 This is called (eventually) for every live object in the system.
1014 The caller to evacuate specifies a desired generation in the
1015 evac_gen global variable. The following conditions apply to
1016 evacuating an object which resides in generation M when we're
1017 collecting up to generation N
1021 else evac to step->to
1023 if M < evac_gen evac to evac_gen, step 0
1025 if the object is already evacuated, then we check which generation
1028 if M >= evac_gen do nothing
1029 if M < evac_gen set failed_to_evac flag to indicate that we
1030 didn't manage to evacuate this object into evac_gen.
1032 -------------------------------------------------------------------------- */
1036 evacuate(StgClosure *q)
1041 const StgInfoTable *info;
1044 if (!LOOKS_LIKE_STATIC(q)) {
1046 if (bd->gen->no > N) {
1047 /* Can't evacuate this object, because it's in a generation
1048 * older than the ones we're collecting. Let's hope that it's
1049 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1051 if (bd->gen->no < evac_gen) {
1053 failed_to_evac = rtsTrue;
1054 TICK_GC_FAILED_PROMOTION();
1058 step = bd->step->to;
1061 /* make sure the info pointer is into text space */
1062 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1063 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1066 switch (info -> type) {
1069 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),step);
1075 to = copy(q,sizeW_fromITBL(info),step);
1077 evacuate_mutable((StgMutClosure *)to);
1081 stable_ptr_table[((StgStableName *)q)->sn].keep = rtsTrue;
1082 to = copy(q,sizeofW(StgStableName),step);
1090 to = copy(q,sizeofW(StgHeader)+1,step);
1094 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1105 to = copy(q,sizeofW(StgHeader)+2,step);
1113 case IND_OLDGEN_PERM:
1118 to = copy(q,sizeW_fromITBL(info),step);
1124 to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1129 to = copy(q,BLACKHOLE_sizeW(),step);
1131 evacuate_mutable((StgMutClosure *)to);
1134 case THUNK_SELECTOR:
1136 const StgInfoTable* selectee_info;
1137 StgClosure* selectee = ((StgSelector*)q)->selectee;
1140 selectee_info = get_itbl(selectee);
1141 switch (selectee_info->type) {
1150 StgNat32 offset = info->layout.selector_offset;
1152 /* check that the size is in range */
1154 (StgNat32)(selectee_info->layout.payload.ptrs +
1155 selectee_info->layout.payload.nptrs));
1157 /* perform the selection! */
1158 q = selectee->payload[offset];
1160 /* if we're already in to-space, there's no need to continue
1161 * with the evacuation, just update the source address with
1162 * a pointer to the (evacuated) constructor field.
1164 if (IS_USER_PTR(q)) {
1165 bdescr *bd = Bdescr((P_)q);
1166 if (bd->evacuated) {
1167 if (bd->gen->no < evac_gen) {
1168 failed_to_evac = rtsTrue;
1169 TICK_GC_FAILED_PROMOTION();
1175 /* otherwise, carry on and evacuate this constructor field,
1176 * (but not the constructor itself)
1185 case IND_OLDGEN_PERM:
1186 selectee = stgCast(StgInd *,selectee)->indirectee;
1190 selectee = stgCast(StgCAF *,selectee)->value;
1194 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1204 case THUNK_SELECTOR:
1205 /* aargh - do recursively???? */
1210 /* not evaluated yet */
1214 barf("evacuate: THUNK_SELECTOR: strange selectee");
1217 to = copy(q,THUNK_SELECTOR_sizeW(),step);
1223 /* follow chains of indirections, don't evacuate them */
1224 q = ((StgInd*)q)->indirectee;
1227 /* ToDo: optimise STATIC_LINK for known cases.
1228 - FUN_STATIC : payload[0]
1229 - THUNK_STATIC : payload[1]
1230 - IND_STATIC : payload[1]
1234 if (info->srt_len == 0) { /* small optimisation */
1240 /* don't want to evacuate these, but we do want to follow pointers
1241 * from SRTs - see scavenge_static.
1244 /* put the object on the static list, if necessary.
1246 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1247 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1248 static_objects = (StgClosure *)q;
1252 case CONSTR_INTLIKE:
1253 case CONSTR_CHARLIKE:
1254 case CONSTR_NOCAF_STATIC:
1255 /* no need to put these on the static linked list, they don't need
1270 /* shouldn't see these */
1271 barf("evacuate: stack frame\n");
1275 /* these are special - the payload is a copy of a chunk of stack,
1277 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1282 /* Already evacuated, just return the forwarding address.
1283 * HOWEVER: if the requested destination generation (evac_gen) is
1284 * older than the actual generation (because the object was
1285 * already evacuated to a younger generation) then we have to
1286 * set the failed_to_evac flag to indicate that we couldn't
1287 * manage to promote the object to the desired generation.
1289 if (evac_gen > 0) { /* optimisation */
1290 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1291 if (Bdescr((P_)p)->gen->no < evac_gen) {
1292 /* fprintf(stderr,"evac failed!\n");*/
1293 failed_to_evac = rtsTrue;
1294 TICK_GC_FAILED_PROMOTION();
1297 return ((StgEvacuated*)q)->evacuee;
1302 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1304 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1305 evacuate_large((P_)q, rtsFalse);
1308 /* just copy the block */
1309 to = copy(q,size,step);
1316 case MUT_ARR_PTRS_FROZEN:
1318 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1320 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1321 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1324 /* just copy the block */
1325 to = copy(q,size,step);
1327 if (info->type == MUT_ARR_PTRS) {
1328 evacuate_mutable((StgMutClosure *)to);
1336 StgTSO *tso = stgCast(StgTSO *,q);
1337 nat size = tso_sizeW(tso);
1340 /* Large TSOs don't get moved, so no relocation is required.
1342 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1343 evacuate_large((P_)q, rtsTrue);
1346 /* To evacuate a small TSO, we need to relocate the update frame
1350 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1352 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1354 /* relocate the stack pointers... */
1355 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1356 new_tso->sp = (StgPtr)new_tso->sp + diff;
1357 new_tso->splim = (StgPtr)new_tso->splim + diff;
1359 relocate_TSO(tso, new_tso);
1360 upd_evacuee(q,(StgClosure *)new_tso);
1362 evacuate_mutable((StgMutClosure *)new_tso);
1363 return (StgClosure *)new_tso;
1369 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1373 barf("evacuate: strange closure type");
1379 /* -----------------------------------------------------------------------------
1380 relocate_TSO is called just after a TSO has been copied from src to
1381 dest. It adjusts the update frame list for the new location.
1382 -------------------------------------------------------------------------- */
1385 relocate_TSO(StgTSO *src, StgTSO *dest)
1392 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1396 while ((P_)su < dest->stack + dest->stack_size) {
1397 switch (get_itbl(su)->type) {
1399 /* GCC actually manages to common up these three cases! */
1402 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1407 cf = (StgCatchFrame *)su;
1408 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1413 sf = (StgSeqFrame *)su;
1414 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1423 barf("relocate_TSO");
1432 scavenge_srt(const StgInfoTable *info)
1434 StgClosure **srt, **srt_end;
1436 /* evacuate the SRT. If srt_len is zero, then there isn't an
1437 * srt field in the info table. That's ok, because we'll
1438 * never dereference it.
1440 srt = stgCast(StgClosure **,info->srt);
1441 srt_end = srt + info->srt_len;
1442 for (; srt < srt_end; srt++) {
1447 /* -----------------------------------------------------------------------------
1448 Scavenge a given step until there are no more objects in this step
1451 evac_gen is set by the caller to be either zero (for a step in a
1452 generation < N) or G where G is the generation of the step being
1455 We sometimes temporarily change evac_gen back to zero if we're
1456 scavenging a mutable object where early promotion isn't such a good
1458 -------------------------------------------------------------------------- */
1462 scavenge(step *step)
1465 const StgInfoTable *info;
1467 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1472 failed_to_evac = rtsFalse;
1474 /* scavenge phase - standard breadth-first scavenging of the
1478 while (bd != step->hp_bd || p < step->hp) {
1480 /* If we're at the end of this block, move on to the next block */
1481 if (bd != step->hp_bd && p == bd->free) {
1487 q = p; /* save ptr to object */
1489 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1490 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1492 info = get_itbl((StgClosure *)p);
1493 switch (info -> type) {
1497 StgBCO* bco = stgCast(StgBCO*,p);
1499 for (i = 0; i < bco->n_ptrs; i++) {
1500 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1502 p += bco_sizeW(bco);
1507 /* treat MVars specially, because we don't want to evacuate the
1508 * mut_link field in the middle of the closure.
1511 StgMVar *mvar = ((StgMVar *)p);
1513 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1514 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1515 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1516 p += sizeofW(StgMVar);
1517 evac_gen = saved_evac_gen;
1525 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1526 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1527 p += sizeofW(StgHeader) + 2;
1532 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1533 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1539 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1540 p += sizeofW(StgHeader) + 1;
1545 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1551 p += sizeofW(StgHeader) + 1;
1558 p += sizeofW(StgHeader) + 2;
1565 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1566 p += sizeofW(StgHeader) + 2;
1579 case IND_OLDGEN_PERM:
1585 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1586 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1587 (StgClosure *)*p = evacuate((StgClosure *)*p);
1589 p += info->layout.payload.nptrs;
1594 /* ignore MUT_CONSs */
1595 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1597 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1598 evac_gen = saved_evac_gen;
1600 p += sizeofW(StgMutVar);
1605 p += BLACKHOLE_sizeW();
1610 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1611 (StgClosure *)bh->blocking_queue =
1612 evacuate((StgClosure *)bh->blocking_queue);
1613 if (failed_to_evac) {
1614 failed_to_evac = rtsFalse;
1615 evacuate_mutable((StgMutClosure *)bh);
1617 p += BLACKHOLE_sizeW();
1621 case THUNK_SELECTOR:
1623 StgSelector *s = (StgSelector *)p;
1624 s->selectee = evacuate(s->selectee);
1625 p += THUNK_SELECTOR_sizeW();
1631 barf("scavenge:IND???\n");
1633 case CONSTR_INTLIKE:
1634 case CONSTR_CHARLIKE:
1636 case CONSTR_NOCAF_STATIC:
1640 /* Shouldn't see a static object here. */
1641 barf("scavenge: STATIC object\n");
1653 /* Shouldn't see stack frames here. */
1654 barf("scavenge: stack frame\n");
1656 case AP_UPD: /* same as PAPs */
1658 /* Treat a PAP just like a section of stack, not forgetting to
1659 * evacuate the function pointer too...
1662 StgPAP* pap = stgCast(StgPAP*,p);
1664 pap->fun = evacuate(pap->fun);
1665 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1666 p += pap_sizeW(pap);
1672 /* nothing to follow */
1673 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1677 /* follow everything */
1681 evac_gen = 0; /* repeatedly mutable */
1682 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1683 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1684 (StgClosure *)*p = evacuate((StgClosure *)*p);
1686 evac_gen = saved_evac_gen;
1690 case MUT_ARR_PTRS_FROZEN:
1691 /* follow everything */
1693 StgPtr start = p, next;
1695 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1696 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1697 (StgClosure *)*p = evacuate((StgClosure *)*p);
1699 if (failed_to_evac) {
1700 /* we can do this easier... */
1701 evacuate_mutable((StgMutClosure *)start);
1702 failed_to_evac = rtsFalse;
1713 /* chase the link field for any TSOs on the same queue */
1714 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1715 /* scavenge this thread's stack */
1716 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1717 evac_gen = saved_evac_gen;
1718 p += tso_sizeW(tso);
1725 barf("scavenge: unimplemented/strange closure type\n");
1731 /* If we didn't manage to promote all the objects pointed to by
1732 * the current object, then we have to designate this object as
1733 * mutable (because it contains old-to-new generation pointers).
1735 if (failed_to_evac) {
1736 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1737 failed_to_evac = rtsFalse;
1745 /* -----------------------------------------------------------------------------
1746 Scavenge one object.
1748 This is used for objects that are temporarily marked as mutable
1749 because they contain old-to-new generation pointers. Only certain
1750 objects can have this property.
1751 -------------------------------------------------------------------------- */
1753 scavenge_one(StgPtr p)
1758 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1759 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1761 info = get_itbl((StgClosure *)p);
1763 switch (info -> type) {
1766 case FUN_1_0: /* hardly worth specialising these guys */
1786 case IND_OLDGEN_PERM:
1792 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1793 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1794 (StgClosure *)*p = evacuate((StgClosure *)*p);
1803 case THUNK_SELECTOR:
1805 StgSelector *s = (StgSelector *)p;
1806 s->selectee = evacuate(s->selectee);
1810 case AP_UPD: /* same as PAPs */
1812 /* Treat a PAP just like a section of stack, not forgetting to
1813 * evacuate the function pointer too...
1816 StgPAP* pap = stgCast(StgPAP*,p);
1818 pap->fun = evacuate(pap->fun);
1819 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1824 /* This might happen if for instance a MUT_CONS was pointing to a
1825 * THUNK which has since been updated. The IND_OLDGEN will
1826 * be on the mutable list anyway, so we don't need to do anything
1832 barf("scavenge_one: strange object");
1835 no_luck = failed_to_evac;
1836 failed_to_evac = rtsFalse;
1841 /* -----------------------------------------------------------------------------
1842 Scavenging mutable lists.
1844 We treat the mutable list of each generation > N (i.e. all the
1845 generations older than the one being collected) as roots. We also
1846 remove non-mutable objects from the mutable list at this point.
1847 -------------------------------------------------------------------------- */
1849 static StgMutClosure *
1850 scavenge_mutable_list(StgMutClosure *p, nat gen)
1853 StgMutClosure *start;
1854 StgMutClosure **prev;
1861 failed_to_evac = rtsFalse;
1863 for (; p != END_MUT_LIST; p = *prev) {
1865 /* make sure the info pointer is into text space */
1866 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1867 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1870 switch(info->type) {
1872 case MUT_ARR_PTRS_FROZEN:
1873 /* remove this guy from the mutable list, but follow the ptrs
1874 * anyway (and make sure they get promoted to this gen).
1879 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1881 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1882 (StgClosure *)*q = evacuate((StgClosure *)*q);
1886 if (failed_to_evac) {
1887 failed_to_evac = rtsFalse;
1888 prev = &p->mut_link;
1890 *prev = p->mut_link;
1896 /* follow everything */
1897 prev = &p->mut_link;
1901 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1902 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1903 (StgClosure *)*q = evacuate((StgClosure *)*q);
1909 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1910 * it from the mutable list if possible by promoting whatever it
1913 if (p->header.info == &MUT_CONS_info) {
1915 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1916 /* didn't manage to promote everything, so leave the
1917 * MUT_CONS on the list.
1919 prev = &p->mut_link;
1921 *prev = p->mut_link;
1925 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1926 prev = &p->mut_link;
1932 StgMVar *mvar = (StgMVar *)p;
1933 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1934 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1935 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1936 prev = &p->mut_link;
1941 /* follow ptrs and remove this from the mutable list */
1943 StgTSO *tso = (StgTSO *)p;
1945 /* Don't bother scavenging if this thread is dead
1947 if (!(tso->whatNext == ThreadComplete ||
1948 tso->whatNext == ThreadKilled)) {
1949 /* Don't need to chase the link field for any TSOs on the
1950 * same queue. Just scavenge this thread's stack
1952 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1955 /* Don't take this TSO off the mutable list - it might still
1956 * point to some younger objects (because we set evac_gen to 0
1959 prev = &tso->mut_link;
1964 case IND_OLDGEN_PERM:
1966 /* Try to pull the indirectee into this generation, so we can
1967 * remove the indirection from the mutable list.
1970 ((StgIndOldGen *)p)->indirectee =
1971 evacuate(((StgIndOldGen *)p)->indirectee);
1974 if (failed_to_evac) {
1975 failed_to_evac = rtsFalse;
1976 prev = &p->mut_link;
1978 *prev = p->mut_link;
1979 /* the mut_link field of an IND_STATIC is overloaded as the
1980 * static link field too (it just so happens that we don't need
1981 * both at the same time), so we need to NULL it out when
1982 * removing this object from the mutable list because the static
1983 * link fields are all assumed to be NULL before doing a major
1992 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1993 (StgClosure *)bh->blocking_queue =
1994 evacuate((StgClosure *)bh->blocking_queue);
1995 prev = &p->mut_link;
2000 /* shouldn't have anything else on the mutables list */
2001 barf("scavenge_mutable_object: non-mutable object?");
2008 scavenge_static(void)
2010 StgClosure* p = static_objects;
2011 const StgInfoTable *info;
2013 /* Always evacuate straight to the oldest generation for static
2015 evac_gen = oldest_gen->no;
2017 /* keep going until we've scavenged all the objects on the linked
2019 while (p != END_OF_STATIC_LIST) {
2023 /* make sure the info pointer is into text space */
2024 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2025 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2027 /* Take this object *off* the static_objects list,
2028 * and put it on the scavenged_static_objects list.
2030 static_objects = STATIC_LINK(info,p);
2031 STATIC_LINK(info,p) = scavenged_static_objects;
2032 scavenged_static_objects = p;
2034 switch (info -> type) {
2038 StgInd *ind = (StgInd *)p;
2039 ind->indirectee = evacuate(ind->indirectee);
2041 /* might fail to evacuate it, in which case we have to pop it
2042 * back on the mutable list (and take it off the
2043 * scavenged_static list because the static link and mut link
2044 * pointers are one and the same).
2046 if (failed_to_evac) {
2047 failed_to_evac = rtsFalse;
2048 scavenged_static_objects = STATIC_LINK(info,p);
2049 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
2050 oldest_gen->mut_list = (StgMutClosure *)ind;
2064 next = (P_)p->payload + info->layout.payload.ptrs;
2065 /* evacuate the pointers */
2066 for (q = (P_)p->payload; q < next; q++) {
2067 (StgClosure *)*q = evacuate((StgClosure *)*q);
2073 barf("scavenge_static");
2076 ASSERT(failed_to_evac == rtsFalse);
2078 /* get the next static object from the list. Remeber, there might
2079 * be more stuff on this list now that we've done some evacuating!
2080 * (static_objects is a global)
2086 /* -----------------------------------------------------------------------------
2087 scavenge_stack walks over a section of stack and evacuates all the
2088 objects pointed to by it. We can use the same code for walking
2089 PAPs, since these are just sections of copied stack.
2090 -------------------------------------------------------------------------- */
2093 scavenge_stack(StgPtr p, StgPtr stack_end)
2096 const StgInfoTable* info;
2100 * Each time around this loop, we are looking at a chunk of stack
2101 * that starts with either a pending argument section or an
2102 * activation record.
2105 while (p < stack_end) {
2106 q = *stgCast(StgPtr*,p);
2108 /* If we've got a tag, skip over that many words on the stack */
2109 if (IS_ARG_TAG(stgCast(StgWord,q))) {
2114 /* Is q a pointer to a closure?
2116 if (! LOOKS_LIKE_GHC_INFO(q)) {
2119 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
2120 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
2122 /* otherwise, must be a pointer into the allocation space.
2126 (StgClosure *)*p = evacuate((StgClosure *)q);
2132 * Otherwise, q must be the info pointer of an activation
2133 * record. All activation records have 'bitmap' style layout
2136 info = get_itbl(stgCast(StgClosure*,p));
2138 switch (info->type) {
2140 /* Dynamic bitmap: the mask is stored on the stack */
2142 bitmap = stgCast(StgRetDyn*,p)->liveness;
2143 p = &payloadWord(stgCast(StgRetDyn*,p),0);
2146 /* probably a slow-entry point return address: */
2152 /* Specialised code for update frames, since they're so common.
2153 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2154 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2158 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2160 StgClosureType type = get_itbl(frame->updatee)->type;
2162 p += sizeofW(StgUpdateFrame);
2163 if (type == EVACUATED) {
2164 frame->updatee = evacuate(frame->updatee);
2167 bdescr *bd = Bdescr((P_)frame->updatee);
2169 if (bd->gen->no > N) {
2170 if (bd->gen->no < evac_gen) {
2171 failed_to_evac = rtsTrue;
2175 step = bd->step->to;
2179 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2180 sizeofW(StgHeader), step);
2181 upd_evacuee(frame->updatee,to);
2182 frame->updatee = to;
2185 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2186 upd_evacuee(frame->updatee,to);
2187 frame->updatee = to;
2188 evacuate_mutable((StgMutClosure *)to);
2191 barf("scavenge_stack: UPDATE_FRAME updatee");
2196 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2203 bitmap = info->layout.bitmap;
2206 while (bitmap != 0) {
2207 if ((bitmap & 1) == 0) {
2208 (StgClosure *)*p = evacuate((StgClosure *)*p);
2211 bitmap = bitmap >> 1;
2218 /* large bitmap (> 32 entries) */
2223 StgLargeBitmap *large_bitmap;
2226 large_bitmap = info->layout.large_bitmap;
2229 for (i=0; i<large_bitmap->size; i++) {
2230 bitmap = large_bitmap->bitmap[i];
2231 q = p + sizeof(W_) * 8;
2232 while (bitmap != 0) {
2233 if ((bitmap & 1) == 0) {
2234 (StgClosure *)*p = evacuate((StgClosure *)*p);
2237 bitmap = bitmap >> 1;
2239 if (i+1 < large_bitmap->size) {
2241 (StgClosure *)*p = evacuate((StgClosure *)*p);
2247 /* and don't forget to follow the SRT */
2252 barf("scavenge_stack: weird activation record found on stack.\n");
2257 /*-----------------------------------------------------------------------------
2258 scavenge the large object list.
2260 evac_gen set by caller; similar games played with evac_gen as with
2261 scavenge() - see comment at the top of scavenge(). Most large
2262 objects are (repeatedly) mutable, so most of the time evac_gen will
2264 --------------------------------------------------------------------------- */
2267 scavenge_large(step *step)
2271 const StgInfoTable* info;
2272 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2274 evac_gen = 0; /* most objects are mutable */
2275 bd = step->new_large_objects;
2277 for (; bd != NULL; bd = step->new_large_objects) {
2279 /* take this object *off* the large objects list and put it on
2280 * the scavenged large objects list. This is so that we can
2281 * treat new_large_objects as a stack and push new objects on
2282 * the front when evacuating.
2284 step->new_large_objects = bd->link;
2285 dbl_link_onto(bd, &step->scavenged_large_objects);
2288 info = get_itbl(stgCast(StgClosure*,p));
2290 switch (info->type) {
2292 /* only certain objects can be "large"... */
2296 /* nothing to follow */
2300 /* follow everything */
2304 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2305 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2306 (StgClosure *)*p = evacuate((StgClosure *)*p);
2311 case MUT_ARR_PTRS_FROZEN:
2312 /* follow everything */
2314 StgPtr start = p, next;
2316 evac_gen = saved_evac_gen; /* not really mutable */
2317 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2318 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2319 (StgClosure *)*p = evacuate((StgClosure *)*p);
2322 if (failed_to_evac) {
2323 evacuate_mutable((StgMutClosure *)start);
2330 StgBCO* bco = stgCast(StgBCO*,p);
2332 evac_gen = saved_evac_gen;
2333 for (i = 0; i < bco->n_ptrs; i++) {
2334 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2345 /* chase the link field for any TSOs on the same queue */
2346 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2347 /* scavenge this thread's stack */
2348 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2353 barf("scavenge_large: unknown/strange object");
2359 zeroStaticObjectList(StgClosure* first_static)
2363 const StgInfoTable *info;
2365 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2367 link = STATIC_LINK(info, p);
2368 STATIC_LINK(info,p) = NULL;
2372 /* This function is only needed because we share the mutable link
2373 * field with the static link field in an IND_STATIC, so we have to
2374 * zero the mut_link field before doing a major GC, which needs the
2375 * static link field.
2377 * It doesn't do any harm to zero all the mutable link fields on the
2381 zeroMutableList(StgMutClosure *first)
2383 StgMutClosure *next, *c;
2385 for (c = first; c != END_MUT_LIST; c = next) {
2391 /* -----------------------------------------------------------------------------
2393 -------------------------------------------------------------------------- */
2395 void RevertCAFs(void)
2397 while (enteredCAFs != END_CAF_LIST) {
2398 StgCAF* caf = enteredCAFs;
2400 enteredCAFs = caf->link;
2401 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2402 SET_INFO(caf,&CAF_UNENTERED_info);
2403 caf->value = stgCast(StgClosure*,0xdeadbeef);
2404 caf->link = stgCast(StgCAF*,0xdeadbeef);
2408 void revertDeadCAFs(void)
2410 StgCAF* caf = enteredCAFs;
2411 enteredCAFs = END_CAF_LIST;
2412 while (caf != END_CAF_LIST) {
2413 StgCAF* next = caf->link;
2415 switch(GET_INFO(caf)->type) {
2418 /* This object has been evacuated, it must be live. */
2419 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2420 new->link = enteredCAFs;
2426 SET_INFO(caf,&CAF_UNENTERED_info);
2427 caf->value = stgCast(StgClosure*,0xdeadbeef);
2428 caf->link = stgCast(StgCAF*,0xdeadbeef);
2432 barf("revertDeadCAFs: enteredCAFs list corrupted");
2438 /* -----------------------------------------------------------------------------
2439 Sanity code for CAF garbage collection.
2441 With DEBUG turned on, we manage a CAF list in addition to the SRT
2442 mechanism. After GC, we run down the CAF list and blackhole any
2443 CAFs which have been garbage collected. This means we get an error
2444 whenever the program tries to enter a garbage collected CAF.
2446 Any garbage collected CAFs are taken off the CAF list at the same
2448 -------------------------------------------------------------------------- */
2456 const StgInfoTable *info;
2467 ASSERT(info->type == IND_STATIC);
2469 if (STATIC_LINK(info,p) == NULL) {
2470 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2472 SET_INFO(p,&BLACKHOLE_info);
2473 p = STATIC_LINK2(info,p);
2477 pp = &STATIC_LINK2(info,p);
2484 /* fprintf(stderr, "%d CAFs live\n", i); */
2488 /* -----------------------------------------------------------------------------
2491 Whenever a thread returns to the scheduler after possibly doing
2492 some work, we have to run down the stack and black-hole all the
2493 closures referred to by update frames.
2494 -------------------------------------------------------------------------- */
2497 threadLazyBlackHole(StgTSO *tso)
2499 StgUpdateFrame *update_frame;
2500 StgBlockingQueue *bh;
2503 stack_end = &tso->stack[tso->stack_size];
2504 update_frame = tso->su;
2507 switch (get_itbl(update_frame)->type) {
2510 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2514 bh = (StgBlockingQueue *)update_frame->updatee;
2516 /* if the thunk is already blackholed, it means we've also
2517 * already blackholed the rest of the thunks on this stack,
2518 * so we can stop early.
2520 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
2521 * don't interfere with this optimisation.
2523 if (bh->header.info == &BLACKHOLE_info) {
2527 if (bh->header.info != &BLACKHOLE_BQ_info &&
2528 bh->header.info != &CAF_BLACKHOLE_info) {
2529 SET_INFO(bh,&BLACKHOLE_info);
2532 update_frame = update_frame->link;
2536 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2542 barf("threadPaused");
2547 /* -----------------------------------------------------------------------------
2550 * Code largely pinched from old RTS, then hacked to bits. We also do
2551 * lazy black holing here.
2553 * -------------------------------------------------------------------------- */
2556 threadSqueezeStack(StgTSO *tso)
2558 lnat displacement = 0;
2559 StgUpdateFrame *frame;
2560 StgUpdateFrame *next_frame; /* Temporally next */
2561 StgUpdateFrame *prev_frame; /* Temporally previous */
2563 rtsBool prev_was_update_frame;
2565 bottom = &(tso->stack[tso->stack_size]);
2568 /* There must be at least one frame, namely the STOP_FRAME.
2570 ASSERT((P_)frame < bottom);
2572 /* Walk down the stack, reversing the links between frames so that
2573 * we can walk back up as we squeeze from the bottom. Note that
2574 * next_frame and prev_frame refer to next and previous as they were
2575 * added to the stack, rather than the way we see them in this
2576 * walk. (It makes the next loop less confusing.)
2578 * Stop if we find an update frame pointing to a black hole
2579 * (see comment in threadLazyBlackHole()).
2583 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2584 prev_frame = frame->link;
2585 frame->link = next_frame;
2588 if (get_itbl(frame)->type == UPDATE_FRAME
2589 && frame->updatee->header.info == &BLACKHOLE_info) {
2594 /* Now, we're at the bottom. Frame points to the lowest update
2595 * frame on the stack, and its link actually points to the frame
2596 * above. We have to walk back up the stack, squeezing out empty
2597 * update frames and turning the pointers back around on the way
2600 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2601 * we never want to eliminate it anyway. Just walk one step up
2602 * before starting to squeeze. When you get to the topmost frame,
2603 * remember that there are still some words above it that might have
2610 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2613 * Loop through all of the frames (everything except the very
2614 * bottom). Things are complicated by the fact that we have
2615 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2616 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2618 while (frame != NULL) {
2620 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2621 rtsBool is_update_frame;
2623 next_frame = frame->link;
2624 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2627 * 1. both the previous and current frame are update frames
2628 * 2. the current frame is empty
2630 if (prev_was_update_frame && is_update_frame &&
2631 (P_)prev_frame == frame_bottom + displacement) {
2633 /* Now squeeze out the current frame */
2634 StgClosure *updatee_keep = prev_frame->updatee;
2635 StgClosure *updatee_bypass = frame->updatee;
2638 fprintf(stderr, "squeezing frame at %p\n", frame);
2641 /* Deal with blocking queues. If both updatees have blocked
2642 * threads, then we should merge the queues into the update
2643 * frame that we're keeping.
2645 * Alternatively, we could just wake them up: they'll just go
2646 * straight to sleep on the proper blackhole! This is less code
2647 * and probably less bug prone, although it's probably much
2650 #if 0 /* do it properly... */
2651 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2652 /* Sigh. It has one. Don't lose those threads! */
2653 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2654 /* Urgh. Two queues. Merge them. */
2655 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2657 while (keep_tso->link != END_TSO_QUEUE) {
2658 keep_tso = keep_tso->link;
2660 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2663 /* For simplicity, just swap the BQ for the BH */
2664 P_ temp = updatee_keep;
2666 updatee_keep = updatee_bypass;
2667 updatee_bypass = temp;
2669 /* Record the swap in the kept frame (below) */
2670 prev_frame->updatee = updatee_keep;
2675 TICK_UPD_SQUEEZED();
2676 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2678 sp = (P_)frame - 1; /* sp = stuff to slide */
2679 displacement += sizeofW(StgUpdateFrame);
2682 /* No squeeze for this frame */
2683 sp = frame_bottom - 1; /* Keep the current frame */
2685 /* Do lazy black-holing.
2687 if (is_update_frame) {
2688 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2689 if (bh->header.info != &BLACKHOLE_BQ_info &&
2690 bh->header.info != &CAF_BLACKHOLE_info) {
2691 SET_INFO(bh,&BLACKHOLE_info);
2695 /* Fix the link in the current frame (should point to the frame below) */
2696 frame->link = prev_frame;
2697 prev_was_update_frame = is_update_frame;
2700 /* Now slide all words from sp up to the next frame */
2702 if (displacement > 0) {
2703 P_ next_frame_bottom;
2705 if (next_frame != NULL)
2706 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2708 next_frame_bottom = tso->sp - 1;
2711 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2715 while (sp >= next_frame_bottom) {
2716 sp[displacement] = *sp;
2720 (P_)prev_frame = (P_)frame + displacement;
2724 tso->sp += displacement;
2725 tso->su = prev_frame;
2728 /* -----------------------------------------------------------------------------
2731 * We have to prepare for GC - this means doing lazy black holing
2732 * here. We also take the opportunity to do stack squeezing if it's
2734 * -------------------------------------------------------------------------- */
2737 threadPaused(StgTSO *tso)
2739 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2740 threadSqueezeStack(tso); /* does black holing too */
2742 threadLazyBlackHole(tso);