1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.18 1999/01/20 16:24:02 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
26 /* STATIC OBJECT LIST.
29 * We maintain a linked list of static objects that are still live.
30 * The requirements for this list are:
32 * - we need to scan the list while adding to it, in order to
33 * scavenge all the static objects (in the same way that
34 * breadth-first scavenging works for dynamic objects).
36 * - we need to be able to tell whether an object is already on
37 * the list, to break loops.
39 * Each static object has a "static link field", which we use for
40 * linking objects on to the list. We use a stack-type list, consing
41 * objects on the front as they are added (this means that the
42 * scavenge phase is depth-first, not breadth-first, but that
45 * A separate list is kept for objects that have been scavenged
46 * already - this is so that we can zero all the marks afterwards.
48 * An object is on the list if its static link field is non-zero; this
49 * means that we have to mark the end of the list with '1', not NULL.
51 * Extra notes for generational GC:
53 * Each generation has a static object list associated with it. When
54 * collecting generations up to N, we treat the static object lists
55 * from generations > N as roots.
57 * We build up a static object list while collecting generations 0..N,
58 * which is then appended to the static object list of generation N+1.
60 StgClosure* static_objects; /* live static objects */
61 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
63 /* N is the oldest generation being collected, where the generations
64 * are numbered starting at 0. A major GC (indicated by the major_gc
65 * flag) is when we're collecting all generations. We only attempt to
66 * deal with static objects and GC CAFs when doing a major GC.
69 static rtsBool major_gc;
71 /* Youngest generation that objects should be evacuated to in
72 * evacuate(). (Logically an argument to evacuate, but it's static
73 * a lot of the time so we optimise it into a global variable).
79 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
80 static rtsBool weak_done; /* all done for this pass */
82 /* Flag indicating failure to evacuate an object to the desired
85 static rtsBool failed_to_evac;
87 /* Old to-space (used for two-space collector only)
91 /* -----------------------------------------------------------------------------
92 Static function declarations
93 -------------------------------------------------------------------------- */
95 static StgClosure *evacuate(StgClosure *q);
96 static void zeroStaticObjectList(StgClosure* first_static);
97 static rtsBool traverse_weak_ptr_list(void);
98 static void zeroMutableList(StgMutClosure *first);
99 static void revertDeadCAFs(void);
101 static void scavenge_stack(StgPtr p, StgPtr stack_end);
102 static void scavenge_large(step *step);
103 static void scavenge(step *step);
104 static void scavenge_static(void);
105 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
108 static void gcCAFs(void);
111 /* -----------------------------------------------------------------------------
114 For garbage collecting generation N (and all younger generations):
116 - follow all pointers in the root set. the root set includes all
117 mutable objects in all steps in all generations.
119 - for each pointer, evacuate the object it points to into either
120 + to-space in the next higher step in that generation, if one exists,
121 + if the object's generation == N, then evacuate it to the next
122 generation if one exists, or else to-space in the current
124 + if the object's generation < N, then evacuate it to to-space
125 in the next generation.
127 - repeatedly scavenge to-space from each step in each generation
128 being collected until no more objects can be evacuated.
130 - free from-space in each step, and set from-space = to-space.
132 -------------------------------------------------------------------------- */
134 void GarbageCollect(void (*get_roots)(void))
138 lnat live, allocated, collected = 0;
142 CostCentreStack *prev_CCS;
145 /* tell the stats department that we've started a GC */
148 /* attribute any costs to CCS_GC */
154 /* We might have been called from Haskell land by _ccall_GC, in
155 * which case we need to call threadPaused() because the scheduler
156 * won't have done it.
158 if (CurrentTSO) { threadPaused(CurrentTSO); }
160 /* Approximate how much we allocated: number of blocks in the
161 * nursery + blocks allocated via allocate() - unused nusery blocks.
162 * This leaves a little slop at the end of each block, and doesn't
163 * take into account large objects (ToDo).
165 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
166 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
167 allocated -= BLOCK_SIZE_W;
170 /* Figure out which generation to collect
173 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
174 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
178 major_gc = (N == RtsFlags.GcFlags.generations-1);
180 /* check stack sanity *before* GC (ToDo: check all threads) */
181 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
182 IF_DEBUG(sanity, checkFreeListSanity());
184 /* Initialise the static object lists
186 static_objects = END_OF_STATIC_LIST;
187 scavenged_static_objects = END_OF_STATIC_LIST;
189 /* zero the mutable list for the oldest generation (see comment by
190 * zeroMutableList below).
193 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
196 /* Save the old to-space if we're doing a two-space collection
198 if (RtsFlags.GcFlags.generations == 1) {
199 old_to_space = g0s0->to_space;
200 g0s0->to_space = NULL;
203 /* Initialise to-space in all the generations/steps that we're
206 for (g = 0; g <= N; g++) {
207 generations[g].mut_list = END_MUT_LIST;
209 for (s = 0; s < generations[g].n_steps; s++) {
211 /* generation 0, step 0 doesn't need to-space */
212 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
216 /* Get a free block for to-space. Extra blocks will be chained on
220 step = &generations[g].steps[s];
221 ASSERT(step->gen->no == g);
222 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
223 bd->gen = &generations[g];
226 bd->evacuated = 1; /* it's a to-space block */
227 step->hp = bd->start;
228 step->hpLim = step->hp + BLOCK_SIZE_W;
231 step->to_blocks = 1; /* ???? */
232 step->scan = bd->start;
234 step->new_large_objects = NULL;
235 step->scavenged_large_objects = NULL;
236 /* mark the large objects as not evacuated yet */
237 for (bd = step->large_objects; bd; bd = bd->link) {
243 /* make sure the older generations have at least one block to
244 * allocate into (this makes things easier for copy(), see below.
246 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
247 for (s = 0; s < generations[g].n_steps; s++) {
248 step = &generations[g].steps[s];
249 if (step->hp_bd == NULL) {
251 bd->gen = &generations[g];
254 bd->evacuated = 0; /* *not* a to-space block */
255 step->hp = bd->start;
256 step->hpLim = step->hp + BLOCK_SIZE_W;
261 /* Set the scan pointer for older generations: remember we
262 * still have to scavenge objects that have been promoted. */
263 step->scan = step->hp;
264 step->scan_bd = step->hp_bd;
265 step->to_space = NULL;
267 step->new_large_objects = NULL;
268 step->scavenged_large_objects = NULL;
272 /* -----------------------------------------------------------------------
273 * follow all the roots that we know about:
274 * - mutable lists from each generation > N
275 * we want to *scavenge* these roots, not evacuate them: they're not
276 * going to move in this GC.
277 * Also: do them in reverse generation order. This is because we
278 * often want to promote objects that are pointed to by older
279 * generations early, so we don't have to repeatedly copy them.
280 * Doing the generations in reverse order ensures that we don't end
281 * up in the situation where we want to evac an object to gen 3 and
282 * it has already been evaced to gen 2.
285 StgMutClosure *tmp, **pp;
286 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
287 generations[g].saved_mut_list = generations[g].mut_list;
288 generations[g].mut_list = END_MUT_LIST;
291 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
292 tmp = scavenge_mutable_list(generations[g].saved_mut_list, g);
293 pp = &generations[g].mut_list;
294 while (*pp != END_MUT_LIST) {
295 pp = &(*pp)->mut_link;
301 /* follow all the roots that the application knows about.
306 /* And don't forget to mark the TSO if we got here direct from
309 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
312 /* Mark the weak pointer list, and prepare to detect dead weak
316 old_weak_ptr_list = weak_ptr_list;
317 weak_ptr_list = NULL;
318 weak_done = rtsFalse;
322 /* ToDo: To fix the caf leak, we need to make the commented out
323 * parts of this code do something sensible - as described in
326 extern void markHugsObjects(void);
328 /* ToDo: This (undefined) function should contain the scavenge
329 * loop immediately below this block of code - but I'm not sure
330 * enough of the details to do this myself.
332 scavengeEverything();
333 /* revert dead CAFs and update enteredCAFs list */
338 /* This will keep the CAFs and the attached BCOs alive
339 * but the values will have been reverted
341 scavengeEverything();
346 /* -------------------------------------------------------------------------
347 * Repeatedly scavenge all the areas we know about until there's no
348 * more scavenging to be done.
355 /* scavenge static objects */
356 if (major_gc && static_objects != END_OF_STATIC_LIST) {
360 /* When scavenging the older generations: Objects may have been
361 * evacuated from generations <= N into older generations, and we
362 * need to scavenge these objects. We're going to try to ensure that
363 * any evacuations that occur move the objects into at least the
364 * same generation as the object being scavenged, otherwise we
365 * have to create new entries on the mutable list for the older
369 /* scavenge each step in generations 0..maxgen */
372 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
373 for (s = 0; s < generations[gen].n_steps; s++) {
374 step = &generations[gen].steps[s];
376 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
380 if (step->new_large_objects != NULL) {
381 scavenge_large(step);
387 if (flag) { goto loop; }
389 /* must be last... */
390 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
395 /* Set the maximum blocks for the oldest generation, based on twice
396 * the amount of live data now, adjusted to fit the maximum heap
399 * This is an approximation, since in the worst case we'll need
400 * twice the amount of live data plus whatever space the other
403 if (RtsFlags.GcFlags.generations > 1) {
405 oldest_gen->max_blocks =
406 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
407 RtsFlags.GcFlags.minOldGenSize);
408 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
409 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
410 if (((int)oldest_gen->max_blocks -
411 (int)oldest_gen->steps[0].to_blocks) <
412 (RtsFlags.GcFlags.pcFreeHeap *
413 RtsFlags.GcFlags.maxHeapSize / 200)) {
419 /* For a two-space collector, we need to resize the nursery. */
421 /* set up a new nursery. Allocate a nursery size based on a
422 * function of the amount of live data (currently a factor of 2,
423 * should be configurable (ToDo)). Use the blocks from the old
424 * nursery if possible, freeing up any left over blocks.
426 * If we get near the maximum heap size, then adjust our nursery
427 * size accordingly. If the nursery is the same size as the live
428 * data (L), then we need 3L bytes. We can reduce the size of the
429 * nursery to bring the required memory down near 2L bytes.
431 * A normal 2-space collector would need 4L bytes to give the same
432 * performance we get from 3L bytes, reducing to the same
433 * performance at 2L bytes.
435 nat blocks = g0s0->to_blocks;
437 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
438 RtsFlags.GcFlags.maxHeapSize ) {
439 int adjusted_blocks; /* signed on purpose */
442 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
443 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
444 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
445 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
448 blocks = adjusted_blocks;
451 blocks *= RtsFlags.GcFlags.oldGenFactor;
452 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
453 blocks = RtsFlags.GcFlags.minAllocAreaSize;
457 if (nursery_blocks < blocks) {
458 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
460 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
464 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
466 for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
474 g0s0->n_blocks = nursery_blocks = blocks;
477 /* run through all the generations/steps and tidy up
479 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
482 generations[g].collections++; /* for stats */
485 for (s = 0; s < generations[g].n_steps; s++) {
487 step = &generations[g].steps[s];
489 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
490 /* Tidy the end of the to-space chains */
491 step->hp_bd->free = step->hp;
492 step->hp_bd->link = NULL;
495 /* for generations we collected... */
498 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
500 /* free old memory and shift to-space into from-space for all
501 * the collected steps (except the allocation area). These
502 * freed blocks will probaby be quickly recycled.
504 if (!(g == 0 && s == 0)) {
505 freeChain(step->blocks);
506 step->blocks = step->to_space;
507 step->n_blocks = step->to_blocks;
508 step->to_space = NULL;
510 for (bd = step->blocks; bd != NULL; bd = bd->link) {
511 bd->evacuated = 0; /* now from-space */
515 /* LARGE OBJECTS. The current live large objects are chained on
516 * scavenged_large, having been moved during garbage
517 * collection from large_objects. Any objects left on
518 * large_objects list are therefore dead, so we free them here.
520 for (bd = step->large_objects; bd != NULL; bd = next) {
525 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
528 step->large_objects = step->scavenged_large_objects;
530 /* Set the maximum blocks for this generation, interpolating
531 * between the maximum size of the oldest and youngest
534 * max_blocks = alloc_area_size +
535 * (oldgen_max_blocks - alloc_area_size) * G
536 * -----------------------------------------
540 generations[g].max_blocks =
541 RtsFlags.GcFlags.minAllocAreaSize +
542 (((oldest_gen->max_blocks - RtsFlags.GcFlags.minAllocAreaSize) * g)
543 / (RtsFlags.GcFlags.generations-1));
546 /* for older generations... */
549 /* For older generations, we need to append the
550 * scavenged_large_object list (i.e. large objects that have been
551 * promoted during this GC) to the large_object list for that step.
553 for (bd = step->scavenged_large_objects; bd; bd = next) {
556 dbl_link_onto(bd, &step->large_objects);
559 /* add the new blocks we promoted during this GC */
560 step->n_blocks += step->to_blocks;
565 /* Two-space collector:
566 * Free the old to-space, and estimate the amount of live data.
568 if (RtsFlags.GcFlags.generations == 1) {
569 if (old_to_space != NULL) {
570 freeChain(old_to_space);
572 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
573 bd->evacuated = 0; /* now from-space */
575 live = g0s0->to_blocks * BLOCK_SIZE_W +
576 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
578 /* Generational collector:
579 * estimate the amount of live data.
583 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
584 for (s = 0; s < generations[g].n_steps; s++) {
585 /* approximate amount of live data (doesn't take into account slop
586 * at end of each block). ToDo: this more accurately.
588 if (g == 0 && s == 0) { continue; }
589 step = &generations[g].steps[s];
590 live += step->n_blocks * BLOCK_SIZE_W +
591 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
596 /* revert dead CAFs and update enteredCAFs list */
599 /* mark the garbage collected CAFs as dead */
601 if (major_gc) { gcCAFs(); }
604 /* zero the scavenged static object list */
606 zeroStaticObjectList(scavenged_static_objects);
611 for (bd = g0s0->blocks; bd; bd = bd->link) {
612 bd->free = bd->start;
613 ASSERT(bd->gen == g0);
614 ASSERT(bd->step == g0s0);
616 current_nursery = g0s0->blocks;
618 /* Free the small objects allocated via allocate(), since this will
619 * all have been copied into G0S1 now.
621 if (small_alloc_list != NULL) {
622 freeChain(small_alloc_list);
624 small_alloc_list = NULL;
626 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
628 /* start any pending finalisers */
629 scheduleFinalisers(old_weak_ptr_list);
631 /* check sanity after GC */
633 if (RtsFlags.GcFlags.generations == 1) {
634 IF_DEBUG(sanity, checkHeap(g0s0->to_space, NULL));
635 IF_DEBUG(sanity, checkChain(g0s0->large_objects));
638 for (g = 0; g <= N; g++) {
639 for (s = 0; s < generations[g].n_steps; s++) {
640 if (g == 0 && s == 0) { continue; }
641 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
644 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
645 for (s = 0; s < generations[g].n_steps; s++) {
646 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks,
647 generations[g].steps[s].blocks->start));
648 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
651 IF_DEBUG(sanity, checkFreeListSanity());
655 IF_DEBUG(gc, stat_describe_gens());
658 /* symbol-table based profiling */
659 /* heapCensus(to_space); */ /* ToDo */
662 /* restore enclosing cost centre */
667 /* check for memory leaks if sanity checking is on */
668 IF_DEBUG(sanity, memInventory());
670 /* ok, GC over: tell the stats department what happened. */
671 stat_endGC(allocated, collected, live, N);
674 /* -----------------------------------------------------------------------------
677 traverse_weak_ptr_list is called possibly many times during garbage
678 collection. It returns a flag indicating whether it did any work
679 (i.e. called evacuate on any live pointers).
681 Invariant: traverse_weak_ptr_list is called when the heap is in an
682 idempotent state. That means that there are no pending
683 evacuate/scavenge operations. This invariant helps the weak
684 pointer code decide which weak pointers are dead - if there are no
685 new live weak pointers, then all the currently unreachable ones are
688 For generational GC: we just don't try to finalise weak pointers in
689 older generations than the one we're collecting. This could
690 probably be optimised by keeping per-generation lists of weak
691 pointers, but for a few weak pointers this scheme will work.
692 -------------------------------------------------------------------------- */
695 traverse_weak_ptr_list(void)
697 StgWeak *w, **last_w, *next_w;
699 const StgInfoTable *info;
700 rtsBool flag = rtsFalse;
702 if (weak_done) { return rtsFalse; }
704 /* doesn't matter where we evacuate values/finalisers to, since
705 * these pointers are treated as roots (iff the keys are alive).
709 last_w = &old_weak_ptr_list;
710 for (w = old_weak_ptr_list; w; w = next_w) {
713 /* ignore weak pointers in older generations */
714 if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) {
715 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive (in old gen) at %p\n", w));
716 /* remove this weak ptr from the old_weak_ptr list */
718 /* and put it on the new weak ptr list */
720 w->link = weak_ptr_list;
726 info = get_itbl(target);
727 switch (info->type) {
732 case IND_OLDGEN: /* rely on compatible layout with StgInd */
733 case IND_OLDGEN_PERM:
734 /* follow indirections */
735 target = ((StgInd *)target)->indirectee;
739 /* If key is alive, evacuate value and finaliser and
740 * place weak ptr on new weak ptr list.
742 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w));
743 w->key = ((StgEvacuated *)target)->evacuee;
744 w->value = evacuate(w->value);
745 w->finaliser = evacuate(w->finaliser);
747 /* remove this weak ptr from the old_weak_ptr list */
750 /* and put it on the new weak ptr list */
752 w->link = weak_ptr_list;
757 default: /* key is dead */
764 /* If we didn't make any changes, then we can go round and kill all
765 * the dead weak pointers. The old_weak_ptr list is used as a list
766 * of pending finalisers later on.
768 if (flag == rtsFalse) {
769 for (w = old_weak_ptr_list; w; w = w->link) {
770 w->value = evacuate(w->value);
771 w->finaliser = evacuate(w->finaliser);
780 MarkRoot(StgClosure *root)
782 root = evacuate(root);
786 static inline void addBlock(step *step)
788 bdescr *bd = allocBlock();
792 if (step->gen->no <= N) {
798 step->hp_bd->free = step->hp;
799 step->hp_bd->link = bd;
800 step->hp = bd->start;
801 step->hpLim = step->hp + BLOCK_SIZE_W;
806 static __inline__ StgClosure *
807 copy(StgClosure *src, nat size, bdescr *bd)
812 /* Find out where we're going, using the handy "to" pointer in
813 * the step of the source object. If it turns out we need to
814 * evacuate to an older generation, adjust it here (see comment
818 if (step->gen->no < evac_gen) {
819 step = &generations[evac_gen].steps[0];
822 /* chain a new block onto the to-space for the destination step if
825 if (step->hp + size >= step->hpLim) {
831 for(to = dest, from = (P_)src; size>0; --size) {
834 return (StgClosure *)dest;
837 /* Special version of copy() for when we only want to copy the info
838 * pointer of an object, but reserve some padding after it. This is
839 * used to optimise evacuation of BLACKHOLEs.
842 static __inline__ StgClosure *
843 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, bdescr *bd)
849 if (step->gen->no < evac_gen) {
850 step = &generations[evac_gen].steps[0];
853 if (step->hp + size_to_reserve >= step->hpLim) {
858 step->hp += size_to_reserve;
859 for(to = dest, from = (P_)src; size_to_copy>0; --size_to_copy) {
863 return (StgClosure *)dest;
866 static __inline__ void
867 upd_evacuee(StgClosure *p, StgClosure *dest)
869 StgEvacuated *q = (StgEvacuated *)p;
871 SET_INFO(q,&EVACUATED_info);
875 /* -----------------------------------------------------------------------------
876 Evacuate a mutable object
878 If we evacuate a mutable object to an old generation, cons the
879 object onto the older generation's mutable list.
880 -------------------------------------------------------------------------- */
883 evacuate_mutable(StgMutClosure *c)
888 if (bd->gen->no > 0) {
889 c->mut_link = bd->gen->mut_list;
890 bd->gen->mut_list = c;
894 /* -----------------------------------------------------------------------------
895 Evacuate a large object
897 This just consists of removing the object from the (doubly-linked)
898 large_alloc_list, and linking it on to the (singly-linked)
899 new_large_objects list, from where it will be scavenged later.
901 Convention: bd->evacuated is /= 0 for a large object that has been
902 evacuated, or 0 otherwise.
903 -------------------------------------------------------------------------- */
906 evacuate_large(StgPtr p, rtsBool mutable)
908 bdescr *bd = Bdescr(p);
911 /* should point to the beginning of the block */
912 ASSERT(((W_)p & BLOCK_MASK) == 0);
914 /* already evacuated? */
916 /* Don't forget to set the failed_to_evac flag if we didn't get
917 * the desired destination (see comments in evacuate()).
919 if (bd->gen->no < evac_gen) {
920 failed_to_evac = rtsTrue;
926 /* remove from large_object list */
928 bd->back->link = bd->link;
929 } else { /* first object in the list */
930 step->large_objects = bd->link;
933 bd->link->back = bd->back;
936 /* link it on to the evacuated large object list of the destination step
939 if (step->gen->no < evac_gen) {
940 step = &generations[evac_gen].steps[0];
945 bd->link = step->new_large_objects;
946 step->new_large_objects = bd;
950 evacuate_mutable((StgMutClosure *)p);
954 /* -----------------------------------------------------------------------------
955 Adding a MUT_CONS to an older generation.
957 This is necessary from time to time when we end up with an
958 old-to-new generation pointer in a non-mutable object. We defer
959 the promotion until the next GC.
960 -------------------------------------------------------------------------- */
963 mkMutCons(StgClosure *ptr, generation *gen)
968 step = &gen->steps[0];
970 /* chain a new block onto the to-space for the destination step if
973 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
977 q = (StgMutVar *)step->hp;
978 step->hp += sizeofW(StgMutVar);
980 SET_HDR(q,&MUT_CONS_info,CCS_GC);
982 evacuate_mutable((StgMutClosure *)q);
984 return (StgClosure *)q;
987 /* -----------------------------------------------------------------------------
990 This is called (eventually) for every live object in the system.
992 The caller to evacuate specifies a desired generation in the
993 evac_gen global variable. The following conditions apply to
994 evacuating an object which resides in generation M when we're
995 collecting up to generation N
999 else evac to step->to
1001 if M < evac_gen evac to evac_gen, step 0
1003 if the object is already evacuated, then we check which generation
1006 if M >= evac_gen do nothing
1007 if M < evac_gen set failed_to_evac flag to indicate that we
1008 didn't manage to evacuate this object into evac_gen.
1010 -------------------------------------------------------------------------- */
1014 evacuate(StgClosure *q)
1018 const StgInfoTable *info;
1021 if (!LOOKS_LIKE_STATIC(q)) {
1023 if (bd->gen->no > N) {
1024 /* Can't evacuate this object, because it's in a generation
1025 * older than the ones we're collecting. Let's hope that it's
1026 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1028 if (bd->gen->no < evac_gen) {
1030 failed_to_evac = rtsTrue;
1036 /* make sure the info pointer is into text space */
1037 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1038 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1041 switch (info -> type) {
1044 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),bd);
1050 to = copy(q,sizeW_fromITBL(info),bd);
1052 evacuate_mutable((StgMutClosure *)to);
1059 case IND_OLDGEN_PERM:
1064 to = copy(q,sizeW_fromITBL(info),bd);
1070 to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),bd);
1075 to = copy(q,BLACKHOLE_sizeW(),bd);
1077 evacuate_mutable((StgMutClosure *)to);
1080 case THUNK_SELECTOR:
1082 const StgInfoTable* selectee_info;
1083 StgClosure* selectee = ((StgSelector*)q)->selectee;
1086 selectee_info = get_itbl(selectee);
1087 switch (selectee_info->type) {
1091 StgNat32 offset = info->layout.selector_offset;
1093 /* check that the size is in range */
1095 (StgNat32)(selectee_info->layout.payload.ptrs +
1096 selectee_info->layout.payload.nptrs));
1098 /* perform the selection! */
1099 q = selectee->payload[offset];
1101 /* if we're already in to-space, there's no need to continue
1102 * with the evacuation, just update the source address with
1103 * a pointer to the (evacuated) constructor field.
1105 if (IS_USER_PTR(q)) {
1106 bdescr *bd = Bdescr((P_)q);
1107 if (bd->evacuated) {
1108 if (bd->gen->no < evac_gen) {
1109 failed_to_evac = rtsTrue;
1115 /* otherwise, carry on and evacuate this constructor field,
1116 * (but not the constructor itself)
1125 case IND_OLDGEN_PERM:
1126 selectee = stgCast(StgInd *,selectee)->indirectee;
1130 selectee = stgCast(StgCAF *,selectee)->value;
1134 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1139 case THUNK_SELECTOR:
1140 /* aargh - do recursively???? */
1145 /* not evaluated yet */
1149 barf("evacuate: THUNK_SELECTOR: strange selectee");
1152 to = copy(q,THUNK_SELECTOR_sizeW(),bd);
1158 /* follow chains of indirections, don't evacuate them */
1159 q = ((StgInd*)q)->indirectee;
1162 /* ToDo: optimise STATIC_LINK for known cases.
1163 - FUN_STATIC : payload[0]
1164 - THUNK_STATIC : payload[1]
1165 - IND_STATIC : payload[1]
1169 if (info->srt_len == 0) { /* small optimisation */
1175 /* don't want to evacuate these, but we do want to follow pointers
1176 * from SRTs - see scavenge_static.
1179 /* put the object on the static list, if necessary.
1181 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1182 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1183 static_objects = (StgClosure *)q;
1187 case CONSTR_INTLIKE:
1188 case CONSTR_CHARLIKE:
1189 case CONSTR_NOCAF_STATIC:
1190 /* no need to put these on the static linked list, they don't need
1205 /* shouldn't see these */
1206 barf("evacuate: stack frame\n");
1210 /* these are special - the payload is a copy of a chunk of stack,
1212 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),bd);
1217 /* Already evacuated, just return the forwarding address.
1218 * HOWEVER: if the requested destination generation (evac_gen) is
1219 * older than the actual generation (because the object was
1220 * already evacuated to a younger generation) then we have to
1221 * set the failed_to_evac flag to indicate that we couldn't
1222 * manage to promote the object to the desired generation.
1224 if (evac_gen > 0) { /* optimisation */
1225 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1226 if (Bdescr((P_)p)->gen->no < evac_gen) {
1227 /* fprintf(stderr,"evac failed!\n");*/
1228 failed_to_evac = rtsTrue;
1231 return ((StgEvacuated*)q)->evacuee;
1236 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1238 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1239 evacuate_large((P_)q, rtsFalse);
1242 /* just copy the block */
1243 to = copy(q,size,bd);
1250 case MUT_ARR_PTRS_FROZEN:
1252 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1254 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1255 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1258 /* just copy the block */
1259 to = copy(q,size,bd);
1261 if (info->type == MUT_ARR_PTRS) {
1262 evacuate_mutable((StgMutClosure *)to);
1270 StgTSO *tso = stgCast(StgTSO *,q);
1271 nat size = tso_sizeW(tso);
1274 /* Large TSOs don't get moved, so no relocation is required.
1276 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1277 evacuate_large((P_)q, rtsTrue);
1280 /* To evacuate a small TSO, we need to relocate the update frame
1284 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),bd);
1286 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1288 /* relocate the stack pointers... */
1289 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1290 new_tso->sp = (StgPtr)new_tso->sp + diff;
1291 new_tso->splim = (StgPtr)new_tso->splim + diff;
1293 relocate_TSO(tso, new_tso);
1294 upd_evacuee(q,(StgClosure *)new_tso);
1296 evacuate_mutable((StgMutClosure *)new_tso);
1297 return (StgClosure *)new_tso;
1303 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1307 barf("evacuate: strange closure type");
1313 /* -----------------------------------------------------------------------------
1314 relocate_TSO is called just after a TSO has been copied from src to
1315 dest. It adjusts the update frame list for the new location.
1316 -------------------------------------------------------------------------- */
1319 relocate_TSO(StgTSO *src, StgTSO *dest)
1326 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1330 while ((P_)su < dest->stack + dest->stack_size) {
1331 switch (get_itbl(su)->type) {
1333 /* GCC actually manages to common up these three cases! */
1336 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1341 cf = (StgCatchFrame *)su;
1342 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1347 sf = (StgSeqFrame *)su;
1348 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1357 barf("relocate_TSO");
1366 scavenge_srt(const StgInfoTable *info)
1368 StgClosure **srt, **srt_end;
1370 /* evacuate the SRT. If srt_len is zero, then there isn't an
1371 * srt field in the info table. That's ok, because we'll
1372 * never dereference it.
1374 srt = stgCast(StgClosure **,info->srt);
1375 srt_end = srt + info->srt_len;
1376 for (; srt < srt_end; srt++) {
1381 /* -----------------------------------------------------------------------------
1382 Scavenge a given step until there are no more objects in this step
1385 evac_gen is set by the caller to be either zero (for a step in a
1386 generation < N) or G where G is the generation of the step being
1389 We sometimes temporarily change evac_gen back to zero if we're
1390 scavenging a mutable object where early promotion isn't such a good
1392 -------------------------------------------------------------------------- */
1396 scavenge(step *step)
1399 const StgInfoTable *info;
1401 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1406 failed_to_evac = rtsFalse;
1408 /* scavenge phase - standard breadth-first scavenging of the
1412 while (bd != step->hp_bd || p < step->hp) {
1414 /* If we're at the end of this block, move on to the next block */
1415 if (bd != step->hp_bd && p == bd->free) {
1421 q = p; /* save ptr to object */
1423 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1424 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1426 info = get_itbl((StgClosure *)p);
1427 switch (info -> type) {
1431 StgBCO* bco = stgCast(StgBCO*,p);
1433 for (i = 0; i < bco->n_ptrs; i++) {
1434 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1436 p += bco_sizeW(bco);
1441 /* treat MVars specially, because we don't want to evacuate the
1442 * mut_link field in the middle of the closure.
1445 StgMVar *mvar = ((StgMVar *)p);
1447 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1448 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1449 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1450 p += sizeofW(StgMVar);
1451 evac_gen = saved_evac_gen;
1464 case IND_OLDGEN_PERM:
1470 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1471 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1472 (StgClosure *)*p = evacuate((StgClosure *)*p);
1474 p += info->layout.payload.nptrs;
1479 /* ignore MUT_CONSs */
1480 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1482 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1483 evac_gen = saved_evac_gen;
1485 p += sizeofW(StgMutVar);
1490 p += BLACKHOLE_sizeW();
1495 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1496 (StgClosure *)bh->blocking_queue =
1497 evacuate((StgClosure *)bh->blocking_queue);
1498 if (failed_to_evac) {
1499 failed_to_evac = rtsFalse;
1500 evacuate_mutable((StgMutClosure *)bh);
1502 p += BLACKHOLE_sizeW();
1506 case THUNK_SELECTOR:
1508 StgSelector *s = (StgSelector *)p;
1509 s->selectee = evacuate(s->selectee);
1510 p += THUNK_SELECTOR_sizeW();
1516 barf("scavenge:IND???\n");
1518 case CONSTR_INTLIKE:
1519 case CONSTR_CHARLIKE:
1521 case CONSTR_NOCAF_STATIC:
1525 /* Shouldn't see a static object here. */
1526 barf("scavenge: STATIC object\n");
1538 /* Shouldn't see stack frames here. */
1539 barf("scavenge: stack frame\n");
1541 case AP_UPD: /* same as PAPs */
1543 /* Treat a PAP just like a section of stack, not forgetting to
1544 * evacuate the function pointer too...
1547 StgPAP* pap = stgCast(StgPAP*,p);
1549 pap->fun = evacuate(pap->fun);
1550 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1551 p += pap_sizeW(pap);
1557 /* nothing to follow */
1558 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1562 /* follow everything */
1566 evac_gen = 0; /* repeatedly mutable */
1567 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1568 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1569 (StgClosure *)*p = evacuate((StgClosure *)*p);
1571 evac_gen = saved_evac_gen;
1575 case MUT_ARR_PTRS_FROZEN:
1576 /* follow everything */
1578 StgPtr start = p, next;
1580 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1581 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1582 (StgClosure *)*p = evacuate((StgClosure *)*p);
1584 if (failed_to_evac) {
1585 /* we can do this easier... */
1586 evacuate_mutable((StgMutClosure *)start);
1587 failed_to_evac = rtsFalse;
1598 /* chase the link field for any TSOs on the same queue */
1599 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1600 /* scavenge this thread's stack */
1601 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1602 evac_gen = saved_evac_gen;
1603 p += tso_sizeW(tso);
1610 barf("scavenge: unimplemented/strange closure type\n");
1616 /* If we didn't manage to promote all the objects pointed to by
1617 * the current object, then we have to designate this object as
1618 * mutable (because it contains old-to-new generation pointers).
1620 if (failed_to_evac) {
1621 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1622 failed_to_evac = rtsFalse;
1630 /* -----------------------------------------------------------------------------
1631 Scavenge one object.
1633 This is used for objects that are temporarily marked as mutable
1634 because they contain old-to-new generation pointers. Only certain
1635 objects can have this property.
1636 -------------------------------------------------------------------------- */
1638 scavenge_one(StgPtr p)
1643 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1644 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1646 info = get_itbl((StgClosure *)p);
1648 switch (info -> type) {
1656 case IND_OLDGEN_PERM:
1662 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1663 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1664 (StgClosure *)*p = evacuate((StgClosure *)*p);
1673 case THUNK_SELECTOR:
1675 StgSelector *s = (StgSelector *)p;
1676 s->selectee = evacuate(s->selectee);
1680 case AP_UPD: /* same as PAPs */
1682 /* Treat a PAP just like a section of stack, not forgetting to
1683 * evacuate the function pointer too...
1686 StgPAP* pap = stgCast(StgPAP*,p);
1688 pap->fun = evacuate(pap->fun);
1689 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1694 /* This might happen if for instance a MUT_CONS was pointing to a
1695 * THUNK which has since been updated. The IND_OLDGEN will
1696 * be on the mutable list anyway, so we don't need to do anything
1702 barf("scavenge_one: strange object");
1705 no_luck = failed_to_evac;
1706 failed_to_evac = rtsFalse;
1711 /* -----------------------------------------------------------------------------
1712 Scavenging mutable lists.
1714 We treat the mutable list of each generation > N (i.e. all the
1715 generations older than the one being collected) as roots. We also
1716 remove non-mutable objects from the mutable list at this point.
1717 -------------------------------------------------------------------------- */
1719 static StgMutClosure *
1720 scavenge_mutable_list(StgMutClosure *p, nat gen)
1723 StgMutClosure *start;
1724 StgMutClosure **prev;
1731 failed_to_evac = rtsFalse;
1733 for (; p != END_MUT_LIST; p = *prev) {
1735 /* make sure the info pointer is into text space */
1736 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1737 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1740 switch(info->type) {
1742 case MUT_ARR_PTRS_FROZEN:
1743 /* remove this guy from the mutable list, but follow the ptrs
1744 * anyway (and make sure they get promoted to this gen).
1749 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1751 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1752 (StgClosure *)*q = evacuate((StgClosure *)*q);
1756 if (failed_to_evac) {
1757 failed_to_evac = rtsFalse;
1758 prev = &p->mut_link;
1760 *prev = p->mut_link;
1766 /* follow everything */
1767 prev = &p->mut_link;
1771 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1772 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1773 (StgClosure *)*q = evacuate((StgClosure *)*q);
1779 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1780 * it from the mutable list if possible by promoting whatever it
1783 if (p->header.info == &MUT_CONS_info) {
1785 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1786 /* didn't manage to promote everything, so leave the
1787 * MUT_CONS on the list.
1789 prev = &p->mut_link;
1791 *prev = p->mut_link;
1795 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1796 prev = &p->mut_link;
1802 StgMVar *mvar = (StgMVar *)p;
1803 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1804 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1805 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1806 prev = &p->mut_link;
1811 /* follow ptrs and remove this from the mutable list */
1813 StgTSO *tso = (StgTSO *)p;
1815 /* Don't bother scavenging if this thread is dead
1817 if (!(tso->whatNext == ThreadComplete ||
1818 tso->whatNext == ThreadKilled)) {
1819 /* Don't need to chase the link field for any TSOs on the
1820 * same queue. Just scavenge this thread's stack
1822 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1825 /* Don't take this TSO off the mutable list - it might still
1826 * point to some younger objects (because we set evac_gen to 0
1829 prev = &tso->mut_link;
1834 case IND_OLDGEN_PERM:
1836 /* Try to pull the indirectee into this generation, so we can
1837 * remove the indirection from the mutable list.
1840 ((StgIndOldGen *)p)->indirectee =
1841 evacuate(((StgIndOldGen *)p)->indirectee);
1844 if (failed_to_evac) {
1845 failed_to_evac = rtsFalse;
1846 prev = &p->mut_link;
1848 *prev = p->mut_link;
1849 /* the mut_link field of an IND_STATIC is overloaded as the
1850 * static link field too (it just so happens that we don't need
1851 * both at the same time), so we need to NULL it out when
1852 * removing this object from the mutable list because the static
1853 * link fields are all assumed to be NULL before doing a major
1862 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1863 (StgClosure *)bh->blocking_queue =
1864 evacuate((StgClosure *)bh->blocking_queue);
1865 prev = &p->mut_link;
1870 /* shouldn't have anything else on the mutables list */
1871 barf("scavenge_mutable_object: non-mutable object?");
1878 scavenge_static(void)
1880 StgClosure* p = static_objects;
1881 const StgInfoTable *info;
1883 /* Always evacuate straight to the oldest generation for static
1885 evac_gen = oldest_gen->no;
1887 /* keep going until we've scavenged all the objects on the linked
1889 while (p != END_OF_STATIC_LIST) {
1893 /* make sure the info pointer is into text space */
1894 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1895 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1897 /* Take this object *off* the static_objects list,
1898 * and put it on the scavenged_static_objects list.
1900 static_objects = STATIC_LINK(info,p);
1901 STATIC_LINK(info,p) = scavenged_static_objects;
1902 scavenged_static_objects = p;
1904 switch (info -> type) {
1908 StgInd *ind = (StgInd *)p;
1909 ind->indirectee = evacuate(ind->indirectee);
1911 /* might fail to evacuate it, in which case we have to pop it
1912 * back on the mutable list (and take it off the
1913 * scavenged_static list because the static link and mut link
1914 * pointers are one and the same).
1916 if (failed_to_evac) {
1917 failed_to_evac = rtsFalse;
1918 scavenged_static_objects = STATIC_LINK(info,p);
1919 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
1920 oldest_gen->mut_list = (StgMutClosure *)ind;
1934 next = (P_)p->payload + info->layout.payload.ptrs;
1935 /* evacuate the pointers */
1936 for (q = (P_)p->payload; q < next; q++) {
1937 (StgClosure *)*q = evacuate((StgClosure *)*q);
1943 barf("scavenge_static");
1946 ASSERT(failed_to_evac == rtsFalse);
1948 /* get the next static object from the list. Remeber, there might
1949 * be more stuff on this list now that we've done some evacuating!
1950 * (static_objects is a global)
1956 /* -----------------------------------------------------------------------------
1957 scavenge_stack walks over a section of stack and evacuates all the
1958 objects pointed to by it. We can use the same code for walking
1959 PAPs, since these are just sections of copied stack.
1960 -------------------------------------------------------------------------- */
1963 scavenge_stack(StgPtr p, StgPtr stack_end)
1966 const StgInfoTable* info;
1970 * Each time around this loop, we are looking at a chunk of stack
1971 * that starts with either a pending argument section or an
1972 * activation record.
1975 while (p < stack_end) {
1976 q = *stgCast(StgPtr*,p);
1978 /* If we've got a tag, skip over that many words on the stack */
1979 if (IS_ARG_TAG(stgCast(StgWord,q))) {
1984 /* Is q a pointer to a closure?
1986 if (! LOOKS_LIKE_GHC_INFO(q)) {
1989 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
1990 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
1992 /* otherwise, must be a pointer into the allocation space.
1996 (StgClosure *)*p = evacuate((StgClosure *)q);
2002 * Otherwise, q must be the info pointer of an activation
2003 * record. All activation records have 'bitmap' style layout
2006 info = get_itbl(stgCast(StgClosure*,p));
2008 switch (info->type) {
2010 /* Dynamic bitmap: the mask is stored on the stack */
2012 bitmap = stgCast(StgRetDyn*,p)->liveness;
2013 p = &payloadWord(stgCast(StgRetDyn*,p),0);
2016 /* probably a slow-entry point return address: */
2022 /* Specialised code for update frames, since they're so common.
2023 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2024 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2028 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2030 StgClosureType type = get_itbl(frame->updatee)->type;
2032 p += sizeofW(StgUpdateFrame);
2033 if (type == EVACUATED) {
2034 frame->updatee = evacuate(frame->updatee);
2037 bdescr *bd = Bdescr((P_)frame->updatee);
2038 if (bd->gen->no > N) {
2039 if (bd->gen->no < evac_gen) {
2040 failed_to_evac = rtsTrue;
2047 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2048 sizeofW(StgHeader), bd);
2049 upd_evacuee(frame->updatee,to);
2050 frame->updatee = to;
2053 to = copy(frame->updatee, BLACKHOLE_sizeW(), bd);
2054 upd_evacuee(frame->updatee,to);
2055 frame->updatee = to;
2056 evacuate_mutable((StgMutClosure *)to);
2059 barf("scavenge_stack: UPDATE_FRAME updatee");
2064 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2071 bitmap = info->layout.bitmap;
2074 while (bitmap != 0) {
2075 if ((bitmap & 1) == 0) {
2076 (StgClosure *)*p = evacuate((StgClosure *)*p);
2079 bitmap = bitmap >> 1;
2086 /* large bitmap (> 32 entries) */
2091 StgLargeBitmap *large_bitmap;
2094 large_bitmap = info->layout.large_bitmap;
2097 for (i=0; i<large_bitmap->size; i++) {
2098 bitmap = large_bitmap->bitmap[i];
2099 q = p + sizeof(W_) * 8;
2100 while (bitmap != 0) {
2101 if ((bitmap & 1) == 0) {
2102 (StgClosure *)*p = evacuate((StgClosure *)*p);
2105 bitmap = bitmap >> 1;
2107 if (i+1 < large_bitmap->size) {
2109 (StgClosure *)*p = evacuate((StgClosure *)*p);
2115 /* and don't forget to follow the SRT */
2120 barf("scavenge_stack: weird activation record found on stack.\n");
2125 /*-----------------------------------------------------------------------------
2126 scavenge the large object list.
2128 evac_gen set by caller; similar games played with evac_gen as with
2129 scavenge() - see comment at the top of scavenge(). Most large
2130 objects are (repeatedly) mutable, so most of the time evac_gen will
2132 --------------------------------------------------------------------------- */
2135 scavenge_large(step *step)
2139 const StgInfoTable* info;
2140 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2142 evac_gen = 0; /* most objects are mutable */
2143 bd = step->new_large_objects;
2145 for (; bd != NULL; bd = step->new_large_objects) {
2147 /* take this object *off* the large objects list and put it on
2148 * the scavenged large objects list. This is so that we can
2149 * treat new_large_objects as a stack and push new objects on
2150 * the front when evacuating.
2152 step->new_large_objects = bd->link;
2153 dbl_link_onto(bd, &step->scavenged_large_objects);
2156 info = get_itbl(stgCast(StgClosure*,p));
2158 switch (info->type) {
2160 /* only certain objects can be "large"... */
2164 /* nothing to follow */
2168 /* follow everything */
2172 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2173 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2174 (StgClosure *)*p = evacuate((StgClosure *)*p);
2179 case MUT_ARR_PTRS_FROZEN:
2180 /* follow everything */
2182 StgPtr start = p, next;
2184 evac_gen = saved_evac_gen; /* not really mutable */
2185 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2186 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2187 (StgClosure *)*p = evacuate((StgClosure *)*p);
2190 if (failed_to_evac) {
2191 evacuate_mutable((StgMutClosure *)start);
2198 StgBCO* bco = stgCast(StgBCO*,p);
2200 evac_gen = saved_evac_gen;
2201 for (i = 0; i < bco->n_ptrs; i++) {
2202 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2213 /* chase the link field for any TSOs on the same queue */
2214 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2215 /* scavenge this thread's stack */
2216 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2221 barf("scavenge_large: unknown/strange object");
2227 zeroStaticObjectList(StgClosure* first_static)
2231 const StgInfoTable *info;
2233 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2235 link = STATIC_LINK(info, p);
2236 STATIC_LINK(info,p) = NULL;
2240 /* This function is only needed because we share the mutable link
2241 * field with the static link field in an IND_STATIC, so we have to
2242 * zero the mut_link field before doing a major GC, which needs the
2243 * static link field.
2245 * It doesn't do any harm to zero all the mutable link fields on the
2249 zeroMutableList(StgMutClosure *first)
2251 StgMutClosure *next, *c;
2253 for (c = first; c != END_MUT_LIST; c = next) {
2259 /* -----------------------------------------------------------------------------
2261 -------------------------------------------------------------------------- */
2263 void RevertCAFs(void)
2265 while (enteredCAFs != END_CAF_LIST) {
2266 StgCAF* caf = enteredCAFs;
2268 enteredCAFs = caf->link;
2269 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2270 SET_INFO(caf,&CAF_UNENTERED_info);
2271 caf->value = stgCast(StgClosure*,0xdeadbeef);
2272 caf->link = stgCast(StgCAF*,0xdeadbeef);
2276 void revertDeadCAFs(void)
2278 StgCAF* caf = enteredCAFs;
2279 enteredCAFs = END_CAF_LIST;
2280 while (caf != END_CAF_LIST) {
2281 StgCAF* next = caf->link;
2283 switch(GET_INFO(caf)->type) {
2286 /* This object has been evacuated, it must be live. */
2287 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2288 new->link = enteredCAFs;
2294 SET_INFO(caf,&CAF_UNENTERED_info);
2295 caf->value = stgCast(StgClosure*,0xdeadbeef);
2296 caf->link = stgCast(StgCAF*,0xdeadbeef);
2300 barf("revertDeadCAFs: enteredCAFs list corrupted");
2306 /* -----------------------------------------------------------------------------
2307 Sanity code for CAF garbage collection.
2309 With DEBUG turned on, we manage a CAF list in addition to the SRT
2310 mechanism. After GC, we run down the CAF list and blackhole any
2311 CAFs which have been garbage collected. This means we get an error
2312 whenever the program tries to enter a garbage collected CAF.
2314 Any garbage collected CAFs are taken off the CAF list at the same
2316 -------------------------------------------------------------------------- */
2324 const StgInfoTable *info;
2335 ASSERT(info->type == IND_STATIC);
2337 if (STATIC_LINK(info,p) == NULL) {
2338 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2340 SET_INFO(p,&BLACKHOLE_info);
2341 p = STATIC_LINK2(info,p);
2345 pp = &STATIC_LINK2(info,p);
2352 /* fprintf(stderr, "%d CAFs live\n", i); */
2356 /* -----------------------------------------------------------------------------
2359 Whenever a thread returns to the scheduler after possibly doing
2360 some work, we have to run down the stack and black-hole all the
2361 closures referred to by update frames.
2362 -------------------------------------------------------------------------- */
2365 threadLazyBlackHole(StgTSO *tso)
2367 StgUpdateFrame *update_frame;
2368 StgBlockingQueue *bh;
2371 stack_end = &tso->stack[tso->stack_size];
2372 update_frame = tso->su;
2375 switch (get_itbl(update_frame)->type) {
2378 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2382 bh = (StgBlockingQueue *)update_frame->updatee;
2384 /* if the thunk is already blackholed, it means we've also
2385 * already blackholed the rest of the thunks on this stack,
2386 * so we can stop early.
2388 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
2389 * don't interfere with this optimisation.
2391 if (bh->header.info == &BLACKHOLE_info) {
2395 if (bh->header.info != &BLACKHOLE_BQ_info &&
2396 bh->header.info != &CAF_BLACKHOLE_info) {
2397 SET_INFO(bh,&BLACKHOLE_info);
2400 update_frame = update_frame->link;
2404 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2410 barf("threadPaused");
2415 /* -----------------------------------------------------------------------------
2418 * Code largely pinched from old RTS, then hacked to bits. We also do
2419 * lazy black holing here.
2421 * -------------------------------------------------------------------------- */
2424 threadSqueezeStack(StgTSO *tso)
2426 lnat displacement = 0;
2427 StgUpdateFrame *frame;
2428 StgUpdateFrame *next_frame; /* Temporally next */
2429 StgUpdateFrame *prev_frame; /* Temporally previous */
2431 rtsBool prev_was_update_frame;
2433 bottom = &(tso->stack[tso->stack_size]);
2436 /* There must be at least one frame, namely the STOP_FRAME.
2438 ASSERT((P_)frame < bottom);
2440 /* Walk down the stack, reversing the links between frames so that
2441 * we can walk back up as we squeeze from the bottom. Note that
2442 * next_frame and prev_frame refer to next and previous as they were
2443 * added to the stack, rather than the way we see them in this
2444 * walk. (It makes the next loop less confusing.)
2446 * Stop if we find an update frame pointing to a black hole
2447 * (see comment in threadLazyBlackHole()).
2451 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2452 prev_frame = frame->link;
2453 frame->link = next_frame;
2456 if (get_itbl(frame)->type == UPDATE_FRAME
2457 && frame->updatee->header.info == &BLACKHOLE_info) {
2462 /* Now, we're at the bottom. Frame points to the lowest update
2463 * frame on the stack, and its link actually points to the frame
2464 * above. We have to walk back up the stack, squeezing out empty
2465 * update frames and turning the pointers back around on the way
2468 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2469 * we never want to eliminate it anyway. Just walk one step up
2470 * before starting to squeeze. When you get to the topmost frame,
2471 * remember that there are still some words above it that might have
2478 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2481 * Loop through all of the frames (everything except the very
2482 * bottom). Things are complicated by the fact that we have
2483 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2484 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2486 while (frame != NULL) {
2488 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2489 rtsBool is_update_frame;
2491 next_frame = frame->link;
2492 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2495 * 1. both the previous and current frame are update frames
2496 * 2. the current frame is empty
2498 if (prev_was_update_frame && is_update_frame &&
2499 (P_)prev_frame == frame_bottom + displacement) {
2501 /* Now squeeze out the current frame */
2502 StgClosure *updatee_keep = prev_frame->updatee;
2503 StgClosure *updatee_bypass = frame->updatee;
2506 fprintf(stderr, "squeezing frame at %p\n", frame);
2509 /* Deal with blocking queues. If both updatees have blocked
2510 * threads, then we should merge the queues into the update
2511 * frame that we're keeping.
2513 * Alternatively, we could just wake them up: they'll just go
2514 * straight to sleep on the proper blackhole! This is less code
2515 * and probably less bug prone, although it's probably much
2518 #if 0 /* do it properly... */
2519 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2520 /* Sigh. It has one. Don't lose those threads! */
2521 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2522 /* Urgh. Two queues. Merge them. */
2523 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2525 while (keep_tso->link != END_TSO_QUEUE) {
2526 keep_tso = keep_tso->link;
2528 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2531 /* For simplicity, just swap the BQ for the BH */
2532 P_ temp = updatee_keep;
2534 updatee_keep = updatee_bypass;
2535 updatee_bypass = temp;
2537 /* Record the swap in the kept frame (below) */
2538 prev_frame->updatee = updatee_keep;
2543 TICK_UPD_SQUEEZED();
2544 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2546 sp = (P_)frame - 1; /* sp = stuff to slide */
2547 displacement += sizeofW(StgUpdateFrame);
2550 /* No squeeze for this frame */
2551 sp = frame_bottom - 1; /* Keep the current frame */
2553 /* Do lazy black-holing.
2555 if (is_update_frame) {
2556 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2557 if (bh->header.info != &BLACKHOLE_BQ_info &&
2558 bh->header.info != &CAF_BLACKHOLE_info) {
2559 SET_INFO(bh,&BLACKHOLE_info);
2563 /* Fix the link in the current frame (should point to the frame below) */
2564 frame->link = prev_frame;
2565 prev_was_update_frame = is_update_frame;
2568 /* Now slide all words from sp up to the next frame */
2570 if (displacement > 0) {
2571 P_ next_frame_bottom;
2573 if (next_frame != NULL)
2574 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2576 next_frame_bottom = tso->sp - 1;
2579 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2583 while (sp >= next_frame_bottom) {
2584 sp[displacement] = *sp;
2588 (P_)prev_frame = (P_)frame + displacement;
2592 tso->sp += displacement;
2593 tso->su = prev_frame;
2596 /* -----------------------------------------------------------------------------
2599 * We have to prepare for GC - this means doing lazy black holing
2600 * here. We also take the opportunity to do stack squeezing if it's
2602 * -------------------------------------------------------------------------- */
2605 threadPaused(StgTSO *tso)
2607 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2608 threadSqueezeStack(tso); /* does black holing too */
2610 threadLazyBlackHole(tso);