1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.43 1999/02/26 12:46:46 simonm Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" /* for ReverCAFs prototype */
20 #include "BlockAlloc.h"
22 #include "DebugProf.h"
25 #include "StablePriv.h"
29 /* STATIC OBJECT LIST.
32 * We maintain a linked list of static objects that are still live.
33 * The requirements for this list are:
35 * - we need to scan the list while adding to it, in order to
36 * scavenge all the static objects (in the same way that
37 * breadth-first scavenging works for dynamic objects).
39 * - we need to be able to tell whether an object is already on
40 * the list, to break loops.
42 * Each static object has a "static link field", which we use for
43 * linking objects on to the list. We use a stack-type list, consing
44 * objects on the front as they are added (this means that the
45 * scavenge phase is depth-first, not breadth-first, but that
48 * A separate list is kept for objects that have been scavenged
49 * already - this is so that we can zero all the marks afterwards.
51 * An object is on the list if its static link field is non-zero; this
52 * means that we have to mark the end of the list with '1', not NULL.
54 * Extra notes for generational GC:
56 * Each generation has a static object list associated with it. When
57 * collecting generations up to N, we treat the static object lists
58 * from generations > N as roots.
60 * We build up a static object list while collecting generations 0..N,
61 * which is then appended to the static object list of generation N+1.
63 StgClosure* static_objects; /* live static objects */
64 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
66 /* N is the oldest generation being collected, where the generations
67 * are numbered starting at 0. A major GC (indicated by the major_gc
68 * flag) is when we're collecting all generations. We only attempt to
69 * deal with static objects and GC CAFs when doing a major GC.
72 static rtsBool major_gc;
74 /* Youngest generation that objects should be evacuated to in
75 * evacuate(). (Logically an argument to evacuate, but it's static
76 * a lot of the time so we optimise it into a global variable).
82 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
83 static rtsBool weak_done; /* all done for this pass */
85 /* Flag indicating failure to evacuate an object to the desired
88 static rtsBool failed_to_evac;
90 /* Old to-space (used for two-space collector only)
94 /* Data used for allocation area sizing.
96 lnat new_blocks; /* blocks allocated during this GC */
97 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
99 /* -----------------------------------------------------------------------------
100 Static function declarations
101 -------------------------------------------------------------------------- */
103 static StgClosure *evacuate(StgClosure *q);
104 static void zeroStaticObjectList(StgClosure* first_static);
105 static rtsBool traverse_weak_ptr_list(void);
106 static void zeroMutableList(StgMutClosure *first);
107 static void revertDeadCAFs(void);
109 static void scavenge_stack(StgPtr p, StgPtr stack_end);
110 static void scavenge_large(step *step);
111 static void scavenge(step *step);
112 static void scavenge_static(void);
113 static void scavenge_mutable_list(generation *g);
114 static void scavenge_mut_once_list(generation *g);
117 static void gcCAFs(void);
120 /* -----------------------------------------------------------------------------
123 For garbage collecting generation N (and all younger generations):
125 - follow all pointers in the root set. the root set includes all
126 mutable objects in all steps in all generations.
128 - for each pointer, evacuate the object it points to into either
129 + to-space in the next higher step in that generation, if one exists,
130 + if the object's generation == N, then evacuate it to the next
131 generation if one exists, or else to-space in the current
133 + if the object's generation < N, then evacuate it to to-space
134 in the next generation.
136 - repeatedly scavenge to-space from each step in each generation
137 being collected until no more objects can be evacuated.
139 - free from-space in each step, and set from-space = to-space.
141 -------------------------------------------------------------------------- */
143 void GarbageCollect(void (*get_roots)(void))
147 lnat live, allocated, collected = 0, copied = 0;
151 CostCentreStack *prev_CCS;
154 /* tell the stats department that we've started a GC */
157 /* attribute any costs to CCS_GC */
163 /* We might have been called from Haskell land by _ccall_GC, in
164 * which case we need to call threadPaused() because the scheduler
165 * won't have done it.
167 if (CurrentTSO) { threadPaused(CurrentTSO); }
169 /* Approximate how much we allocated: number of blocks in the
170 * nursery + blocks allocated via allocate() - unused nusery blocks.
171 * This leaves a little slop at the end of each block, and doesn't
172 * take into account large objects (ToDo).
174 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
175 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
176 allocated -= BLOCK_SIZE_W;
179 /* Figure out which generation to collect
182 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
183 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
187 major_gc = (N == RtsFlags.GcFlags.generations-1);
189 /* check stack sanity *before* GC (ToDo: check all threads) */
190 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
191 IF_DEBUG(sanity, checkFreeListSanity());
193 /* Initialise the static object lists
195 static_objects = END_OF_STATIC_LIST;
196 scavenged_static_objects = END_OF_STATIC_LIST;
198 /* zero the mutable list for the oldest generation (see comment by
199 * zeroMutableList below).
202 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
205 /* Save the old to-space if we're doing a two-space collection
207 if (RtsFlags.GcFlags.generations == 1) {
208 old_to_space = g0s0->to_space;
209 g0s0->to_space = NULL;
212 /* Keep a count of how many new blocks we allocated during this GC
213 * (used for resizing the allocation area, later).
217 /* Initialise to-space in all the generations/steps that we're
220 for (g = 0; g <= N; g++) {
221 generations[g].mut_once_list = END_MUT_LIST;
222 generations[g].mut_list = END_MUT_LIST;
224 for (s = 0; s < generations[g].n_steps; s++) {
226 /* generation 0, step 0 doesn't need to-space */
227 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
231 /* Get a free block for to-space. Extra blocks will be chained on
235 step = &generations[g].steps[s];
236 ASSERT(step->gen->no == g);
237 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
238 bd->gen = &generations[g];
241 bd->evacuated = 1; /* it's a to-space block */
242 step->hp = bd->start;
243 step->hpLim = step->hp + BLOCK_SIZE_W;
247 step->scan = bd->start;
249 step->new_large_objects = NULL;
250 step->scavenged_large_objects = NULL;
252 /* mark the large objects as not evacuated yet */
253 for (bd = step->large_objects; bd; bd = bd->link) {
259 /* make sure the older generations have at least one block to
260 * allocate into (this makes things easier for copy(), see below.
262 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
263 for (s = 0; s < generations[g].n_steps; s++) {
264 step = &generations[g].steps[s];
265 if (step->hp_bd == NULL) {
267 bd->gen = &generations[g];
270 bd->evacuated = 0; /* *not* a to-space block */
271 step->hp = bd->start;
272 step->hpLim = step->hp + BLOCK_SIZE_W;
278 /* Set the scan pointer for older generations: remember we
279 * still have to scavenge objects that have been promoted. */
280 step->scan = step->hp;
281 step->scan_bd = step->hp_bd;
282 step->to_space = NULL;
284 step->new_large_objects = NULL;
285 step->scavenged_large_objects = NULL;
289 /* -----------------------------------------------------------------------
290 * follow all the roots that we know about:
291 * - mutable lists from each generation > N
292 * we want to *scavenge* these roots, not evacuate them: they're not
293 * going to move in this GC.
294 * Also: do them in reverse generation order. This is because we
295 * often want to promote objects that are pointed to by older
296 * generations early, so we don't have to repeatedly copy them.
297 * Doing the generations in reverse order ensures that we don't end
298 * up in the situation where we want to evac an object to gen 3 and
299 * it has already been evaced to gen 2.
303 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
304 generations[g].saved_mut_list = generations[g].mut_list;
305 generations[g].mut_list = END_MUT_LIST;
308 /* Do the mut-once lists first */
309 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
310 scavenge_mut_once_list(&generations[g]);
312 for (st = generations[g].n_steps-1; st >= 0; st--) {
313 scavenge(&generations[g].steps[st]);
317 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
318 scavenge_mutable_list(&generations[g]);
320 for (st = generations[g].n_steps-1; st >= 0; st--) {
321 scavenge(&generations[g].steps[st]);
326 /* follow all the roots that the application knows about.
331 /* And don't forget to mark the TSO if we got here direct from
334 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
337 /* Mark the weak pointer list, and prepare to detect dead weak
341 old_weak_ptr_list = weak_ptr_list;
342 weak_ptr_list = NULL;
343 weak_done = rtsFalse;
345 /* Mark the stable pointer table.
347 markStablePtrTable(major_gc);
351 /* ToDo: To fix the caf leak, we need to make the commented out
352 * parts of this code do something sensible - as described in
355 extern void markHugsObjects(void);
357 /* ToDo: This (undefined) function should contain the scavenge
358 * loop immediately below this block of code - but I'm not sure
359 * enough of the details to do this myself.
361 scavengeEverything();
362 /* revert dead CAFs and update enteredCAFs list */
367 /* This will keep the CAFs and the attached BCOs alive
368 * but the values will have been reverted
370 scavengeEverything();
375 /* -------------------------------------------------------------------------
376 * Repeatedly scavenge all the areas we know about until there's no
377 * more scavenging to be done.
384 /* scavenge static objects */
385 if (major_gc && static_objects != END_OF_STATIC_LIST) {
389 /* When scavenging the older generations: Objects may have been
390 * evacuated from generations <= N into older generations, and we
391 * need to scavenge these objects. We're going to try to ensure that
392 * any evacuations that occur move the objects into at least the
393 * same generation as the object being scavenged, otherwise we
394 * have to create new entries on the mutable list for the older
398 /* scavenge each step in generations 0..maxgen */
402 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
403 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
404 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
407 step = &generations[gen].steps[st];
409 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
414 if (step->new_large_objects != NULL) {
415 scavenge_large(step);
422 if (flag) { goto loop; }
424 /* must be last... */
425 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
430 /* Now see which stable names are still alive
432 gcStablePtrTable(major_gc);
434 /* Set the maximum blocks for the oldest generation, based on twice
435 * the amount of live data now, adjusted to fit the maximum heap
438 * This is an approximation, since in the worst case we'll need
439 * twice the amount of live data plus whatever space the other
442 if (RtsFlags.GcFlags.generations > 1) {
444 oldest_gen->max_blocks =
445 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
446 RtsFlags.GcFlags.minOldGenSize);
447 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
448 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
449 if (((int)oldest_gen->max_blocks -
450 (int)oldest_gen->steps[0].to_blocks) <
451 (RtsFlags.GcFlags.pcFreeHeap *
452 RtsFlags.GcFlags.maxHeapSize / 200)) {
459 /* run through all the generations/steps and tidy up
461 copied = new_blocks * BLOCK_SIZE_W;
462 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
465 generations[g].collections++; /* for stats */
468 for (s = 0; s < generations[g].n_steps; s++) {
470 step = &generations[g].steps[s];
472 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
473 /* Tidy the end of the to-space chains */
474 step->hp_bd->free = step->hp;
475 step->hp_bd->link = NULL;
476 /* stats information: how much we copied */
478 copied -= step->hp_bd->start + BLOCK_SIZE_W -
483 /* for generations we collected... */
486 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
488 /* free old memory and shift to-space into from-space for all
489 * the collected steps (except the allocation area). These
490 * freed blocks will probaby be quickly recycled.
492 if (!(g == 0 && s == 0)) {
493 freeChain(step->blocks);
494 step->blocks = step->to_space;
495 step->n_blocks = step->to_blocks;
496 step->to_space = NULL;
498 for (bd = step->blocks; bd != NULL; bd = bd->link) {
499 bd->evacuated = 0; /* now from-space */
503 /* LARGE OBJECTS. The current live large objects are chained on
504 * scavenged_large, having been moved during garbage
505 * collection from large_objects. Any objects left on
506 * large_objects list are therefore dead, so we free them here.
508 for (bd = step->large_objects; bd != NULL; bd = next) {
513 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
516 step->large_objects = step->scavenged_large_objects;
518 /* Set the maximum blocks for this generation, interpolating
519 * between the maximum size of the oldest and youngest
522 * max_blocks = oldgen_max_blocks * G
523 * ----------------------
528 generations[g].max_blocks = (oldest_gen->max_blocks * g)
529 / (RtsFlags.GcFlags.generations-1);
531 generations[g].max_blocks = oldest_gen->max_blocks;
534 /* for older generations... */
537 /* For older generations, we need to append the
538 * scavenged_large_object list (i.e. large objects that have been
539 * promoted during this GC) to the large_object list for that step.
541 for (bd = step->scavenged_large_objects; bd; bd = next) {
544 dbl_link_onto(bd, &step->large_objects);
547 /* add the new blocks we promoted during this GC */
548 step->n_blocks += step->to_blocks;
553 /* Guess the amount of live data for stats. */
556 /* Free the small objects allocated via allocate(), since this will
557 * all have been copied into G0S1 now.
559 if (small_alloc_list != NULL) {
560 freeChain(small_alloc_list);
562 small_alloc_list = NULL;
566 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
568 /* Two-space collector:
569 * Free the old to-space, and estimate the amount of live data.
571 if (RtsFlags.GcFlags.generations == 1) {
574 if (old_to_space != NULL) {
575 freeChain(old_to_space);
577 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
578 bd->evacuated = 0; /* now from-space */
581 /* For a two-space collector, we need to resize the nursery. */
583 /* set up a new nursery. Allocate a nursery size based on a
584 * function of the amount of live data (currently a factor of 2,
585 * should be configurable (ToDo)). Use the blocks from the old
586 * nursery if possible, freeing up any left over blocks.
588 * If we get near the maximum heap size, then adjust our nursery
589 * size accordingly. If the nursery is the same size as the live
590 * data (L), then we need 3L bytes. We can reduce the size of the
591 * nursery to bring the required memory down near 2L bytes.
593 * A normal 2-space collector would need 4L bytes to give the same
594 * performance we get from 3L bytes, reducing to the same
595 * performance at 2L bytes.
597 blocks = g0s0->to_blocks;
599 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
600 RtsFlags.GcFlags.maxHeapSize ) {
601 int adjusted_blocks; /* signed on purpose */
604 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
605 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
606 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
607 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
610 blocks = adjusted_blocks;
613 blocks *= RtsFlags.GcFlags.oldGenFactor;
614 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
615 blocks = RtsFlags.GcFlags.minAllocAreaSize;
618 resizeNursery(blocks);
621 /* Generational collector:
622 * If the user has given us a suggested heap size, adjust our
623 * allocation area to make best use of the memory available.
626 if (RtsFlags.GcFlags.heapSizeSuggestion) {
628 nat needed = calcNeeded(); /* approx blocks needed at next GC */
630 /* Guess how much will be live in generation 0 step 0 next time.
631 * A good approximation is the obtained by finding the
632 * percentage of g0s0 that was live at the last minor GC.
635 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
638 /* Estimate a size for the allocation area based on the
639 * information available. We might end up going slightly under
640 * or over the suggested heap size, but we should be pretty
643 * Formula: suggested - needed
644 * ----------------------------
645 * 1 + g0s0_pcnt_kept/100
647 * where 'needed' is the amount of memory needed at the next
648 * collection for collecting all steps except g0s0.
651 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
652 (100 + (int)g0s0_pcnt_kept);
654 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
655 blocks = RtsFlags.GcFlags.minAllocAreaSize;
658 resizeNursery((nat)blocks);
662 /* revert dead CAFs and update enteredCAFs list */
665 /* mark the garbage collected CAFs as dead */
667 if (major_gc) { gcCAFs(); }
670 /* zero the scavenged static object list */
672 zeroStaticObjectList(scavenged_static_objects);
677 for (bd = g0s0->blocks; bd; bd = bd->link) {
678 bd->free = bd->start;
679 ASSERT(bd->gen == g0);
680 ASSERT(bd->step == g0s0);
681 IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
683 current_nursery = g0s0->blocks;
685 /* start any pending finalizers */
686 scheduleFinalizers(old_weak_ptr_list);
688 /* check sanity after GC */
689 IF_DEBUG(sanity, checkSanity(N));
691 /* extra GC trace info */
692 IF_DEBUG(gc, stat_describe_gens());
695 /* symbol-table based profiling */
696 /* heapCensus(to_space); */ /* ToDo */
699 /* restore enclosing cost centre */
704 /* check for memory leaks if sanity checking is on */
705 IF_DEBUG(sanity, memInventory());
707 /* ok, GC over: tell the stats department what happened. */
708 stat_endGC(allocated, collected, live, copied, N);
711 /* -----------------------------------------------------------------------------
714 traverse_weak_ptr_list is called possibly many times during garbage
715 collection. It returns a flag indicating whether it did any work
716 (i.e. called evacuate on any live pointers).
718 Invariant: traverse_weak_ptr_list is called when the heap is in an
719 idempotent state. That means that there are no pending
720 evacuate/scavenge operations. This invariant helps the weak
721 pointer code decide which weak pointers are dead - if there are no
722 new live weak pointers, then all the currently unreachable ones are
725 For generational GC: we just don't try to finalize weak pointers in
726 older generations than the one we're collecting. This could
727 probably be optimised by keeping per-generation lists of weak
728 pointers, but for a few weak pointers this scheme will work.
729 -------------------------------------------------------------------------- */
732 traverse_weak_ptr_list(void)
734 StgWeak *w, **last_w, *next_w;
736 rtsBool flag = rtsFalse;
738 if (weak_done) { return rtsFalse; }
740 /* doesn't matter where we evacuate values/finalizers to, since
741 * these pointers are treated as roots (iff the keys are alive).
745 last_w = &old_weak_ptr_list;
746 for (w = old_weak_ptr_list; w; w = next_w) {
748 if ((new = isAlive(w->key))) {
750 /* evacuate the value and finalizer */
751 w->value = evacuate(w->value);
752 w->finalizer = evacuate(w->finalizer);
753 /* remove this weak ptr from the old_weak_ptr list */
755 /* and put it on the new weak ptr list */
757 w->link = weak_ptr_list;
760 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
770 /* If we didn't make any changes, then we can go round and kill all
771 * the dead weak pointers. The old_weak_ptr list is used as a list
772 * of pending finalizers later on.
774 if (flag == rtsFalse) {
775 for (w = old_weak_ptr_list; w; w = w->link) {
776 w->value = evacuate(w->value);
777 w->finalizer = evacuate(w->finalizer);
785 /* -----------------------------------------------------------------------------
786 isAlive determines whether the given closure is still alive (after
787 a garbage collection) or not. It returns the new address of the
788 closure if it is alive, or NULL otherwise.
789 -------------------------------------------------------------------------- */
792 isAlive(StgClosure *p)
800 /* ToDo: for static closures, check the static link field.
801 * Problem here is that we sometimes don't set the link field, eg.
802 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
805 /* ignore closures in generations that we're not collecting. */
806 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
810 switch (info->type) {
815 case IND_OLDGEN: /* rely on compatible layout with StgInd */
816 case IND_OLDGEN_PERM:
817 /* follow indirections */
818 p = ((StgInd *)p)->indirectee;
823 return ((StgEvacuated *)p)->evacuee;
833 MarkRoot(StgClosure *root)
835 return evacuate(root);
838 static void addBlock(step *step)
840 bdescr *bd = allocBlock();
844 if (step->gen->no <= N) {
850 step->hp_bd->free = step->hp;
851 step->hp_bd->link = bd;
852 step->hp = bd->start;
853 step->hpLim = step->hp + BLOCK_SIZE_W;
859 static __inline__ void
860 upd_evacuee(StgClosure *p, StgClosure *dest)
862 p->header.info = &EVACUATED_info;
863 ((StgEvacuated *)p)->evacuee = dest;
866 static __inline__ StgClosure *
867 copy(StgClosure *src, nat size, step *step)
871 TICK_GC_WORDS_COPIED(size);
872 /* Find out where we're going, using the handy "to" pointer in
873 * the step of the source object. If it turns out we need to
874 * evacuate to an older generation, adjust it here (see comment
877 if (step->gen->no < evac_gen) {
878 #ifdef NO_EAGER_PROMOTION
879 failed_to_evac = rtsTrue;
881 step = &generations[evac_gen].steps[0];
885 /* chain a new block onto the to-space for the destination step if
888 if (step->hp + size >= step->hpLim) {
892 for(to = step->hp, from = (P_)src; size>0; --size) {
898 upd_evacuee(src,(StgClosure *)dest);
899 return (StgClosure *)dest;
902 /* Special version of copy() for when we only want to copy the info
903 * pointer of an object, but reserve some padding after it. This is
904 * used to optimise evacuation of BLACKHOLEs.
907 static __inline__ StgClosure *
908 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
912 TICK_GC_WORDS_COPIED(size_to_copy);
913 if (step->gen->no < evac_gen) {
914 #ifdef NO_EAGER_PROMOTION
915 failed_to_evac = rtsTrue;
917 step = &generations[evac_gen].steps[0];
921 if (step->hp + size_to_reserve >= step->hpLim) {
925 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
930 step->hp += size_to_reserve;
931 upd_evacuee(src,(StgClosure *)dest);
932 return (StgClosure *)dest;
935 /* -----------------------------------------------------------------------------
936 Evacuate a large object
938 This just consists of removing the object from the (doubly-linked)
939 large_alloc_list, and linking it on to the (singly-linked)
940 new_large_objects list, from where it will be scavenged later.
942 Convention: bd->evacuated is /= 0 for a large object that has been
943 evacuated, or 0 otherwise.
944 -------------------------------------------------------------------------- */
947 evacuate_large(StgPtr p, rtsBool mutable)
949 bdescr *bd = Bdescr(p);
952 /* should point to the beginning of the block */
953 ASSERT(((W_)p & BLOCK_MASK) == 0);
955 /* already evacuated? */
957 /* Don't forget to set the failed_to_evac flag if we didn't get
958 * the desired destination (see comments in evacuate()).
960 if (bd->gen->no < evac_gen) {
961 failed_to_evac = rtsTrue;
962 TICK_GC_FAILED_PROMOTION();
968 /* remove from large_object list */
970 bd->back->link = bd->link;
971 } else { /* first object in the list */
972 step->large_objects = bd->link;
975 bd->link->back = bd->back;
978 /* link it on to the evacuated large object list of the destination step
981 if (step->gen->no < evac_gen) {
982 #ifdef NO_EAGER_PROMOTION
983 failed_to_evac = rtsTrue;
985 step = &generations[evac_gen].steps[0];
991 bd->link = step->new_large_objects;
992 step->new_large_objects = bd;
996 recordMutable((StgMutClosure *)p);
1000 /* -----------------------------------------------------------------------------
1001 Adding a MUT_CONS to an older generation.
1003 This is necessary from time to time when we end up with an
1004 old-to-new generation pointer in a non-mutable object. We defer
1005 the promotion until the next GC.
1006 -------------------------------------------------------------------------- */
1009 mkMutCons(StgClosure *ptr, generation *gen)
1014 step = &gen->steps[0];
1016 /* chain a new block onto the to-space for the destination step if
1019 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1023 q = (StgMutVar *)step->hp;
1024 step->hp += sizeofW(StgMutVar);
1026 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1028 recordOldToNewPtrs((StgMutClosure *)q);
1030 return (StgClosure *)q;
1033 /* -----------------------------------------------------------------------------
1036 This is called (eventually) for every live object in the system.
1038 The caller to evacuate specifies a desired generation in the
1039 evac_gen global variable. The following conditions apply to
1040 evacuating an object which resides in generation M when we're
1041 collecting up to generation N
1045 else evac to step->to
1047 if M < evac_gen evac to evac_gen, step 0
1049 if the object is already evacuated, then we check which generation
1052 if M >= evac_gen do nothing
1053 if M < evac_gen set failed_to_evac flag to indicate that we
1054 didn't manage to evacuate this object into evac_gen.
1056 -------------------------------------------------------------------------- */
1060 evacuate(StgClosure *q)
1065 const StgInfoTable *info;
1068 if (!LOOKS_LIKE_STATIC(q)) {
1070 if (bd->gen->no > N) {
1071 /* Can't evacuate this object, because it's in a generation
1072 * older than the ones we're collecting. Let's hope that it's
1073 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1075 if (bd->gen->no < evac_gen) {
1077 failed_to_evac = rtsTrue;
1078 TICK_GC_FAILED_PROMOTION();
1082 step = bd->step->to;
1085 /* make sure the info pointer is into text space */
1086 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1087 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1090 switch (info -> type) {
1093 return copy(q,bco_sizeW(stgCast(StgBCO*,q)),step);
1096 ASSERT(q->header.info != &MUT_CONS_info);
1098 to = copy(q,sizeW_fromITBL(info),step);
1099 recordMutable((StgMutClosure *)to);
1106 return copy(q,sizeofW(StgHeader)+1,step);
1108 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1113 #ifdef NO_PROMOTE_THUNKS
1114 if (bd->gen->no == 0 &&
1115 bd->step->no != 0 &&
1116 bd->step->no == bd->gen->n_steps-1) {
1120 return copy(q,sizeofW(StgHeader)+2,step);
1128 return copy(q,sizeofW(StgHeader)+2,step);
1134 case IND_OLDGEN_PERM:
1140 return copy(q,sizeW_fromITBL(info),step);
1144 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1147 to = copy(q,BLACKHOLE_sizeW(),step);
1148 recordMutable((StgMutClosure *)to);
1151 case THUNK_SELECTOR:
1153 const StgInfoTable* selectee_info;
1154 StgClosure* selectee = ((StgSelector*)q)->selectee;
1157 selectee_info = get_itbl(selectee);
1158 switch (selectee_info->type) {
1167 StgNat32 offset = info->layout.selector_offset;
1169 /* check that the size is in range */
1171 (StgNat32)(selectee_info->layout.payload.ptrs +
1172 selectee_info->layout.payload.nptrs));
1174 /* perform the selection! */
1175 q = selectee->payload[offset];
1177 /* if we're already in to-space, there's no need to continue
1178 * with the evacuation, just update the source address with
1179 * a pointer to the (evacuated) constructor field.
1181 if (IS_USER_PTR(q)) {
1182 bdescr *bd = Bdescr((P_)q);
1183 if (bd->evacuated) {
1184 if (bd->gen->no < evac_gen) {
1185 failed_to_evac = rtsTrue;
1186 TICK_GC_FAILED_PROMOTION();
1192 /* otherwise, carry on and evacuate this constructor field,
1193 * (but not the constructor itself)
1202 case IND_OLDGEN_PERM:
1203 selectee = stgCast(StgInd *,selectee)->indirectee;
1207 selectee = stgCast(StgCAF *,selectee)->value;
1211 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1221 case THUNK_SELECTOR:
1222 /* aargh - do recursively???? */
1227 /* not evaluated yet */
1231 barf("evacuate: THUNK_SELECTOR: strange selectee");
1234 return copy(q,THUNK_SELECTOR_sizeW(),step);
1238 /* follow chains of indirections, don't evacuate them */
1239 q = ((StgInd*)q)->indirectee;
1242 /* ToDo: optimise STATIC_LINK for known cases.
1243 - FUN_STATIC : payload[0]
1244 - THUNK_STATIC : payload[1]
1245 - IND_STATIC : payload[1]
1249 if (info->srt_len == 0) { /* small optimisation */
1255 /* don't want to evacuate these, but we do want to follow pointers
1256 * from SRTs - see scavenge_static.
1259 /* put the object on the static list, if necessary.
1261 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1262 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1263 static_objects = (StgClosure *)q;
1267 case CONSTR_INTLIKE:
1268 case CONSTR_CHARLIKE:
1269 case CONSTR_NOCAF_STATIC:
1270 /* no need to put these on the static linked list, they don't need
1285 /* shouldn't see these */
1286 barf("evacuate: stack frame\n");
1290 /* these are special - the payload is a copy of a chunk of stack,
1292 return copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1295 /* Already evacuated, just return the forwarding address.
1296 * HOWEVER: if the requested destination generation (evac_gen) is
1297 * older than the actual generation (because the object was
1298 * already evacuated to a younger generation) then we have to
1299 * set the failed_to_evac flag to indicate that we couldn't
1300 * manage to promote the object to the desired generation.
1302 if (evac_gen > 0) { /* optimisation */
1303 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1304 if (Bdescr((P_)p)->gen->no < evac_gen) {
1305 /* fprintf(stderr,"evac failed!\n");*/
1306 failed_to_evac = rtsTrue;
1307 TICK_GC_FAILED_PROMOTION();
1310 return ((StgEvacuated*)q)->evacuee;
1314 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1316 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1317 evacuate_large((P_)q, rtsFalse);
1320 /* just copy the block */
1321 return copy(q,size,step);
1326 case MUT_ARR_PTRS_FROZEN:
1328 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1330 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1331 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1334 /* just copy the block */
1335 to = copy(q,size,step);
1336 if (info->type == MUT_ARR_PTRS) {
1337 recordMutable((StgMutClosure *)to);
1345 StgTSO *tso = stgCast(StgTSO *,q);
1346 nat size = tso_sizeW(tso);
1349 /* Large TSOs don't get moved, so no relocation is required.
1351 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1352 evacuate_large((P_)q, rtsTrue);
1355 /* To evacuate a small TSO, we need to relocate the update frame
1359 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1361 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1363 /* relocate the stack pointers... */
1364 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1365 new_tso->sp = (StgPtr)new_tso->sp + diff;
1366 new_tso->splim = (StgPtr)new_tso->splim + diff;
1368 relocate_TSO(tso, new_tso);
1370 recordMutable((StgMutClosure *)new_tso);
1371 return (StgClosure *)new_tso;
1377 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1381 barf("evacuate: strange closure type");
1387 /* -----------------------------------------------------------------------------
1388 relocate_TSO is called just after a TSO has been copied from src to
1389 dest. It adjusts the update frame list for the new location.
1390 -------------------------------------------------------------------------- */
1393 relocate_TSO(StgTSO *src, StgTSO *dest)
1400 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1404 while ((P_)su < dest->stack + dest->stack_size) {
1405 switch (get_itbl(su)->type) {
1407 /* GCC actually manages to common up these three cases! */
1410 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1415 cf = (StgCatchFrame *)su;
1416 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1421 sf = (StgSeqFrame *)su;
1422 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1431 barf("relocate_TSO");
1440 scavenge_srt(const StgInfoTable *info)
1442 StgClosure **srt, **srt_end;
1444 /* evacuate the SRT. If srt_len is zero, then there isn't an
1445 * srt field in the info table. That's ok, because we'll
1446 * never dereference it.
1448 srt = stgCast(StgClosure **,info->srt);
1449 srt_end = srt + info->srt_len;
1450 for (; srt < srt_end; srt++) {
1455 /* -----------------------------------------------------------------------------
1456 Scavenge a given step until there are no more objects in this step
1459 evac_gen is set by the caller to be either zero (for a step in a
1460 generation < N) or G where G is the generation of the step being
1463 We sometimes temporarily change evac_gen back to zero if we're
1464 scavenging a mutable object where early promotion isn't such a good
1466 -------------------------------------------------------------------------- */
1470 scavenge(step *step)
1473 const StgInfoTable *info;
1475 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1480 failed_to_evac = rtsFalse;
1482 /* scavenge phase - standard breadth-first scavenging of the
1486 while (bd != step->hp_bd || p < step->hp) {
1488 /* If we're at the end of this block, move on to the next block */
1489 if (bd != step->hp_bd && p == bd->free) {
1495 q = p; /* save ptr to object */
1497 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1498 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1500 info = get_itbl((StgClosure *)p);
1501 switch (info -> type) {
1505 StgBCO* bco = stgCast(StgBCO*,p);
1507 for (i = 0; i < bco->n_ptrs; i++) {
1508 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1510 p += bco_sizeW(bco);
1515 /* treat MVars specially, because we don't want to evacuate the
1516 * mut_link field in the middle of the closure.
1519 StgMVar *mvar = ((StgMVar *)p);
1521 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1522 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1523 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1524 p += sizeofW(StgMVar);
1525 evac_gen = saved_evac_gen;
1533 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1534 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1535 p += sizeofW(StgHeader) + 2;
1540 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1541 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1547 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1548 p += sizeofW(StgHeader) + 1;
1553 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1559 p += sizeofW(StgHeader) + 1;
1566 p += sizeofW(StgHeader) + 2;
1573 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1574 p += sizeofW(StgHeader) + 2;
1587 case IND_OLDGEN_PERM:
1593 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1594 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1595 (StgClosure *)*p = evacuate((StgClosure *)*p);
1597 p += info->layout.payload.nptrs;
1602 /* ignore MUT_CONSs */
1603 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1605 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1606 evac_gen = saved_evac_gen;
1608 p += sizeofW(StgMutVar);
1613 p += BLACKHOLE_sizeW();
1618 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1619 (StgClosure *)bh->blocking_queue =
1620 evacuate((StgClosure *)bh->blocking_queue);
1621 if (failed_to_evac) {
1622 failed_to_evac = rtsFalse;
1623 recordMutable((StgMutClosure *)bh);
1625 p += BLACKHOLE_sizeW();
1629 case THUNK_SELECTOR:
1631 StgSelector *s = (StgSelector *)p;
1632 s->selectee = evacuate(s->selectee);
1633 p += THUNK_SELECTOR_sizeW();
1639 barf("scavenge:IND???\n");
1641 case CONSTR_INTLIKE:
1642 case CONSTR_CHARLIKE:
1644 case CONSTR_NOCAF_STATIC:
1648 /* Shouldn't see a static object here. */
1649 barf("scavenge: STATIC object\n");
1661 /* Shouldn't see stack frames here. */
1662 barf("scavenge: stack frame\n");
1664 case AP_UPD: /* same as PAPs */
1666 /* Treat a PAP just like a section of stack, not forgetting to
1667 * evacuate the function pointer too...
1670 StgPAP* pap = stgCast(StgPAP*,p);
1672 pap->fun = evacuate(pap->fun);
1673 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1674 p += pap_sizeW(pap);
1679 /* nothing to follow */
1680 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1684 /* follow everything */
1688 evac_gen = 0; /* repeatedly mutable */
1689 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1690 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1691 (StgClosure *)*p = evacuate((StgClosure *)*p);
1693 evac_gen = saved_evac_gen;
1697 case MUT_ARR_PTRS_FROZEN:
1698 /* follow everything */
1700 StgPtr start = p, next;
1702 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1703 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1704 (StgClosure *)*p = evacuate((StgClosure *)*p);
1706 if (failed_to_evac) {
1707 /* we can do this easier... */
1708 recordMutable((StgMutClosure *)start);
1709 failed_to_evac = rtsFalse;
1720 /* chase the link field for any TSOs on the same queue */
1721 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1722 /* scavenge this thread's stack */
1723 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1724 evac_gen = saved_evac_gen;
1725 p += tso_sizeW(tso);
1732 barf("scavenge: unimplemented/strange closure type\n");
1738 /* If we didn't manage to promote all the objects pointed to by
1739 * the current object, then we have to designate this object as
1740 * mutable (because it contains old-to-new generation pointers).
1742 if (failed_to_evac) {
1743 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1744 failed_to_evac = rtsFalse;
1752 /* -----------------------------------------------------------------------------
1753 Scavenge one object.
1755 This is used for objects that are temporarily marked as mutable
1756 because they contain old-to-new generation pointers. Only certain
1757 objects can have this property.
1758 -------------------------------------------------------------------------- */
1760 scavenge_one(StgClosure *p)
1765 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1766 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1770 switch (info -> type) {
1773 case FUN_1_0: /* hardly worth specialising these guys */
1793 case IND_OLDGEN_PERM:
1799 end = (P_)p->payload + info->layout.payload.ptrs;
1800 for (q = (P_)p->payload; q < end; q++) {
1801 (StgClosure *)*q = evacuate((StgClosure *)*q);
1810 case THUNK_SELECTOR:
1812 StgSelector *s = (StgSelector *)p;
1813 s->selectee = evacuate(s->selectee);
1817 case AP_UPD: /* same as PAPs */
1819 /* Treat a PAP just like a section of stack, not forgetting to
1820 * evacuate the function pointer too...
1823 StgPAP* pap = (StgPAP *)p;
1825 pap->fun = evacuate(pap->fun);
1826 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1831 /* This might happen if for instance a MUT_CONS was pointing to a
1832 * THUNK which has since been updated. The IND_OLDGEN will
1833 * be on the mutable list anyway, so we don't need to do anything
1839 barf("scavenge_one: strange object");
1842 no_luck = failed_to_evac;
1843 failed_to_evac = rtsFalse;
1848 /* -----------------------------------------------------------------------------
1849 Scavenging mutable lists.
1851 We treat the mutable list of each generation > N (i.e. all the
1852 generations older than the one being collected) as roots. We also
1853 remove non-mutable objects from the mutable list at this point.
1854 -------------------------------------------------------------------------- */
1857 scavenge_mut_once_list(generation *gen)
1860 StgMutClosure *p, *next, *new_list;
1862 p = gen->mut_once_list;
1863 new_list = END_MUT_LIST;
1867 failed_to_evac = rtsFalse;
1869 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
1871 /* make sure the info pointer is into text space */
1872 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1873 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1876 switch(info->type) {
1879 case IND_OLDGEN_PERM:
1881 /* Try to pull the indirectee into this generation, so we can
1882 * remove the indirection from the mutable list.
1884 ((StgIndOldGen *)p)->indirectee =
1885 evacuate(((StgIndOldGen *)p)->indirectee);
1888 /* Debugging code to print out the size of the thing we just
1892 StgPtr start = gen->steps[0].scan;
1893 bdescr *start_bd = gen->steps[0].scan_bd;
1895 scavenge(&gen->steps[0]);
1896 if (start_bd != gen->steps[0].scan_bd) {
1897 size += (P_)BLOCK_ROUND_UP(start) - start;
1898 start_bd = start_bd->link;
1899 while (start_bd != gen->steps[0].scan_bd) {
1900 size += BLOCK_SIZE_W;
1901 start_bd = start_bd->link;
1903 size += gen->steps[0].scan -
1904 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1906 size = gen->steps[0].scan - start;
1908 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
1912 /* failed_to_evac might happen if we've got more than two
1913 * generations, we're collecting only generation 0, the
1914 * indirection resides in generation 2 and the indirectee is
1917 if (failed_to_evac) {
1918 failed_to_evac = rtsFalse;
1919 p->mut_link = new_list;
1922 /* the mut_link field of an IND_STATIC is overloaded as the
1923 * static link field too (it just so happens that we don't need
1924 * both at the same time), so we need to NULL it out when
1925 * removing this object from the mutable list because the static
1926 * link fields are all assumed to be NULL before doing a major
1934 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
1935 * it from the mutable list if possible by promoting whatever it
1938 ASSERT(p->header.info == &MUT_CONS_info);
1939 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
1940 /* didn't manage to promote everything, so put the
1941 * MUT_CONS back on the list.
1943 p->mut_link = new_list;
1949 /* shouldn't have anything else on the mutables list */
1950 barf("scavenge_mut_once_list: strange object?");
1954 gen->mut_once_list = new_list;
1959 scavenge_mutable_list(generation *gen)
1962 StgMutClosure *p, *next;
1964 p = gen->saved_mut_list;
1968 failed_to_evac = rtsFalse;
1970 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
1972 /* make sure the info pointer is into text space */
1973 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1974 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1977 switch(info->type) {
1979 case MUT_ARR_PTRS_FROZEN:
1980 /* remove this guy from the mutable list, but follow the ptrs
1981 * anyway (and make sure they get promoted to this gen).
1986 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1988 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1989 (StgClosure *)*q = evacuate((StgClosure *)*q);
1993 if (failed_to_evac) {
1994 failed_to_evac = rtsFalse;
1995 p->mut_link = gen->mut_list;
2002 /* follow everything */
2003 p->mut_link = gen->mut_list;
2008 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2009 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2010 (StgClosure *)*q = evacuate((StgClosure *)*q);
2016 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2017 * it from the mutable list if possible by promoting whatever it
2020 ASSERT(p->header.info != &MUT_CONS_info);
2021 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2022 p->mut_link = gen->mut_list;
2028 StgMVar *mvar = (StgMVar *)p;
2029 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2030 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2031 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2032 p->mut_link = gen->mut_list;
2038 /* follow ptrs and remove this from the mutable list */
2040 StgTSO *tso = (StgTSO *)p;
2042 /* Don't bother scavenging if this thread is dead
2044 if (!(tso->whatNext == ThreadComplete ||
2045 tso->whatNext == ThreadKilled)) {
2046 /* Don't need to chase the link field for any TSOs on the
2047 * same queue. Just scavenge this thread's stack
2049 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2052 /* Don't take this TSO off the mutable list - it might still
2053 * point to some younger objects (because we set evac_gen to 0
2056 tso->mut_link = gen->mut_list;
2057 gen->mut_list = (StgMutClosure *)tso;
2063 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2064 (StgClosure *)bh->blocking_queue =
2065 evacuate((StgClosure *)bh->blocking_queue);
2066 p->mut_link = gen->mut_list;
2072 /* shouldn't have anything else on the mutables list */
2073 barf("scavenge_mut_list: strange object?");
2079 scavenge_static(void)
2081 StgClosure* p = static_objects;
2082 const StgInfoTable *info;
2084 /* Always evacuate straight to the oldest generation for static
2086 evac_gen = oldest_gen->no;
2088 /* keep going until we've scavenged all the objects on the linked
2090 while (p != END_OF_STATIC_LIST) {
2094 /* make sure the info pointer is into text space */
2095 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2096 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2098 /* Take this object *off* the static_objects list,
2099 * and put it on the scavenged_static_objects list.
2101 static_objects = STATIC_LINK(info,p);
2102 STATIC_LINK(info,p) = scavenged_static_objects;
2103 scavenged_static_objects = p;
2105 switch (info -> type) {
2109 StgInd *ind = (StgInd *)p;
2110 ind->indirectee = evacuate(ind->indirectee);
2112 /* might fail to evacuate it, in which case we have to pop it
2113 * back on the mutable list (and take it off the
2114 * scavenged_static list because the static link and mut link
2115 * pointers are one and the same).
2117 if (failed_to_evac) {
2118 failed_to_evac = rtsFalse;
2119 scavenged_static_objects = STATIC_LINK(info,p);
2120 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2121 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2135 next = (P_)p->payload + info->layout.payload.ptrs;
2136 /* evacuate the pointers */
2137 for (q = (P_)p->payload; q < next; q++) {
2138 (StgClosure *)*q = evacuate((StgClosure *)*q);
2144 barf("scavenge_static");
2147 ASSERT(failed_to_evac == rtsFalse);
2149 /* get the next static object from the list. Remeber, there might
2150 * be more stuff on this list now that we've done some evacuating!
2151 * (static_objects is a global)
2157 /* -----------------------------------------------------------------------------
2158 scavenge_stack walks over a section of stack and evacuates all the
2159 objects pointed to by it. We can use the same code for walking
2160 PAPs, since these are just sections of copied stack.
2161 -------------------------------------------------------------------------- */
2164 scavenge_stack(StgPtr p, StgPtr stack_end)
2167 const StgInfoTable* info;
2171 * Each time around this loop, we are looking at a chunk of stack
2172 * that starts with either a pending argument section or an
2173 * activation record.
2176 while (p < stack_end) {
2177 q = *stgCast(StgPtr*,p);
2179 /* If we've got a tag, skip over that many words on the stack */
2180 if (IS_ARG_TAG(stgCast(StgWord,q))) {
2185 /* Is q a pointer to a closure?
2187 if (! LOOKS_LIKE_GHC_INFO(q)) {
2190 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
2191 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
2193 /* otherwise, must be a pointer into the allocation space.
2197 (StgClosure *)*p = evacuate((StgClosure *)q);
2203 * Otherwise, q must be the info pointer of an activation
2204 * record. All activation records have 'bitmap' style layout
2207 info = get_itbl(stgCast(StgClosure*,p));
2209 switch (info->type) {
2211 /* Dynamic bitmap: the mask is stored on the stack */
2213 bitmap = stgCast(StgRetDyn*,p)->liveness;
2214 p = &payloadWord(stgCast(StgRetDyn*,p),0);
2217 /* probably a slow-entry point return address: */
2223 /* Specialised code for update frames, since they're so common.
2224 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2225 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2229 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2231 StgClosureType type = get_itbl(frame->updatee)->type;
2233 p += sizeofW(StgUpdateFrame);
2234 if (type == EVACUATED) {
2235 frame->updatee = evacuate(frame->updatee);
2238 bdescr *bd = Bdescr((P_)frame->updatee);
2240 if (bd->gen->no > N) {
2241 if (bd->gen->no < evac_gen) {
2242 failed_to_evac = rtsTrue;
2247 /* Don't promote blackholes */
2249 if (!(step->gen->no == 0 &&
2251 step->no == step->gen->n_steps-1)) {
2258 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2259 sizeofW(StgHeader), step);
2260 frame->updatee = to;
2263 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2264 frame->updatee = to;
2265 recordMutable((StgMutClosure *)to);
2268 barf("scavenge_stack: UPDATE_FRAME updatee");
2273 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2280 bitmap = info->layout.bitmap;
2283 while (bitmap != 0) {
2284 if ((bitmap & 1) == 0) {
2285 (StgClosure *)*p = evacuate((StgClosure *)*p);
2288 bitmap = bitmap >> 1;
2295 /* large bitmap (> 32 entries) */
2300 StgLargeBitmap *large_bitmap;
2303 large_bitmap = info->layout.large_bitmap;
2306 for (i=0; i<large_bitmap->size; i++) {
2307 bitmap = large_bitmap->bitmap[i];
2308 q = p + sizeof(W_) * 8;
2309 while (bitmap != 0) {
2310 if ((bitmap & 1) == 0) {
2311 (StgClosure *)*p = evacuate((StgClosure *)*p);
2314 bitmap = bitmap >> 1;
2316 if (i+1 < large_bitmap->size) {
2318 (StgClosure *)*p = evacuate((StgClosure *)*p);
2324 /* and don't forget to follow the SRT */
2329 barf("scavenge_stack: weird activation record found on stack.\n");
2334 /*-----------------------------------------------------------------------------
2335 scavenge the large object list.
2337 evac_gen set by caller; similar games played with evac_gen as with
2338 scavenge() - see comment at the top of scavenge(). Most large
2339 objects are (repeatedly) mutable, so most of the time evac_gen will
2341 --------------------------------------------------------------------------- */
2344 scavenge_large(step *step)
2348 const StgInfoTable* info;
2349 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2351 evac_gen = 0; /* most objects are mutable */
2352 bd = step->new_large_objects;
2354 for (; bd != NULL; bd = step->new_large_objects) {
2356 /* take this object *off* the large objects list and put it on
2357 * the scavenged large objects list. This is so that we can
2358 * treat new_large_objects as a stack and push new objects on
2359 * the front when evacuating.
2361 step->new_large_objects = bd->link;
2362 dbl_link_onto(bd, &step->scavenged_large_objects);
2365 info = get_itbl(stgCast(StgClosure*,p));
2367 switch (info->type) {
2369 /* only certain objects can be "large"... */
2372 /* nothing to follow */
2376 /* follow everything */
2380 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2381 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2382 (StgClosure *)*p = evacuate((StgClosure *)*p);
2387 case MUT_ARR_PTRS_FROZEN:
2388 /* follow everything */
2390 StgPtr start = p, next;
2392 evac_gen = saved_evac_gen; /* not really mutable */
2393 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2394 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2395 (StgClosure *)*p = evacuate((StgClosure *)*p);
2398 if (failed_to_evac) {
2399 recordMutable((StgMutClosure *)start);
2406 StgBCO* bco = stgCast(StgBCO*,p);
2408 evac_gen = saved_evac_gen;
2409 for (i = 0; i < bco->n_ptrs; i++) {
2410 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2421 /* chase the link field for any TSOs on the same queue */
2422 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2423 /* scavenge this thread's stack */
2424 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2429 barf("scavenge_large: unknown/strange object");
2435 zeroStaticObjectList(StgClosure* first_static)
2439 const StgInfoTable *info;
2441 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2443 link = STATIC_LINK(info, p);
2444 STATIC_LINK(info,p) = NULL;
2448 /* This function is only needed because we share the mutable link
2449 * field with the static link field in an IND_STATIC, so we have to
2450 * zero the mut_link field before doing a major GC, which needs the
2451 * static link field.
2453 * It doesn't do any harm to zero all the mutable link fields on the
2457 zeroMutableList(StgMutClosure *first)
2459 StgMutClosure *next, *c;
2461 for (c = first; c != END_MUT_LIST; c = next) {
2467 /* -----------------------------------------------------------------------------
2469 -------------------------------------------------------------------------- */
2471 void RevertCAFs(void)
2473 while (enteredCAFs != END_CAF_LIST) {
2474 StgCAF* caf = enteredCAFs;
2476 enteredCAFs = caf->link;
2477 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2478 SET_INFO(caf,&CAF_UNENTERED_info);
2479 caf->value = stgCast(StgClosure*,0xdeadbeef);
2480 caf->link = stgCast(StgCAF*,0xdeadbeef);
2484 void revertDeadCAFs(void)
2486 StgCAF* caf = enteredCAFs;
2487 enteredCAFs = END_CAF_LIST;
2488 while (caf != END_CAF_LIST) {
2489 StgCAF* next = caf->link;
2491 switch(GET_INFO(caf)->type) {
2494 /* This object has been evacuated, it must be live. */
2495 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2496 new->link = enteredCAFs;
2502 SET_INFO(caf,&CAF_UNENTERED_info);
2503 caf->value = stgCast(StgClosure*,0xdeadbeef);
2504 caf->link = stgCast(StgCAF*,0xdeadbeef);
2508 barf("revertDeadCAFs: enteredCAFs list corrupted");
2514 /* -----------------------------------------------------------------------------
2515 Sanity code for CAF garbage collection.
2517 With DEBUG turned on, we manage a CAF list in addition to the SRT
2518 mechanism. After GC, we run down the CAF list and blackhole any
2519 CAFs which have been garbage collected. This means we get an error
2520 whenever the program tries to enter a garbage collected CAF.
2522 Any garbage collected CAFs are taken off the CAF list at the same
2524 -------------------------------------------------------------------------- */
2532 const StgInfoTable *info;
2543 ASSERT(info->type == IND_STATIC);
2545 if (STATIC_LINK(info,p) == NULL) {
2546 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2548 SET_INFO(p,&BLACKHOLE_info);
2549 p = STATIC_LINK2(info,p);
2553 pp = &STATIC_LINK2(info,p);
2560 /* fprintf(stderr, "%d CAFs live\n", i); */
2564 /* -----------------------------------------------------------------------------
2567 Whenever a thread returns to the scheduler after possibly doing
2568 some work, we have to run down the stack and black-hole all the
2569 closures referred to by update frames.
2570 -------------------------------------------------------------------------- */
2573 threadLazyBlackHole(StgTSO *tso)
2575 StgUpdateFrame *update_frame;
2576 StgBlockingQueue *bh;
2579 stack_end = &tso->stack[tso->stack_size];
2580 update_frame = tso->su;
2583 switch (get_itbl(update_frame)->type) {
2586 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2590 bh = (StgBlockingQueue *)update_frame->updatee;
2592 /* if the thunk is already blackholed, it means we've also
2593 * already blackholed the rest of the thunks on this stack,
2594 * so we can stop early.
2596 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
2597 * don't interfere with this optimisation.
2599 if (bh->header.info == &BLACKHOLE_info) {
2603 if (bh->header.info != &BLACKHOLE_BQ_info &&
2604 bh->header.info != &CAF_BLACKHOLE_info) {
2605 SET_INFO(bh,&BLACKHOLE_info);
2608 update_frame = update_frame->link;
2612 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2618 barf("threadPaused");
2623 /* -----------------------------------------------------------------------------
2626 * Code largely pinched from old RTS, then hacked to bits. We also do
2627 * lazy black holing here.
2629 * -------------------------------------------------------------------------- */
2632 threadSqueezeStack(StgTSO *tso)
2634 lnat displacement = 0;
2635 StgUpdateFrame *frame;
2636 StgUpdateFrame *next_frame; /* Temporally next */
2637 StgUpdateFrame *prev_frame; /* Temporally previous */
2639 rtsBool prev_was_update_frame;
2641 bottom = &(tso->stack[tso->stack_size]);
2644 /* There must be at least one frame, namely the STOP_FRAME.
2646 ASSERT((P_)frame < bottom);
2648 /* Walk down the stack, reversing the links between frames so that
2649 * we can walk back up as we squeeze from the bottom. Note that
2650 * next_frame and prev_frame refer to next and previous as they were
2651 * added to the stack, rather than the way we see them in this
2652 * walk. (It makes the next loop less confusing.)
2654 * Stop if we find an update frame pointing to a black hole
2655 * (see comment in threadLazyBlackHole()).
2659 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2660 prev_frame = frame->link;
2661 frame->link = next_frame;
2664 if (get_itbl(frame)->type == UPDATE_FRAME
2665 && frame->updatee->header.info == &BLACKHOLE_info) {
2670 /* Now, we're at the bottom. Frame points to the lowest update
2671 * frame on the stack, and its link actually points to the frame
2672 * above. We have to walk back up the stack, squeezing out empty
2673 * update frames and turning the pointers back around on the way
2676 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2677 * we never want to eliminate it anyway. Just walk one step up
2678 * before starting to squeeze. When you get to the topmost frame,
2679 * remember that there are still some words above it that might have
2686 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2689 * Loop through all of the frames (everything except the very
2690 * bottom). Things are complicated by the fact that we have
2691 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2692 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2694 while (frame != NULL) {
2696 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2697 rtsBool is_update_frame;
2699 next_frame = frame->link;
2700 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2703 * 1. both the previous and current frame are update frames
2704 * 2. the current frame is empty
2706 if (prev_was_update_frame && is_update_frame &&
2707 (P_)prev_frame == frame_bottom + displacement) {
2709 /* Now squeeze out the current frame */
2710 StgClosure *updatee_keep = prev_frame->updatee;
2711 StgClosure *updatee_bypass = frame->updatee;
2714 fprintf(stderr, "squeezing frame at %p\n", frame);
2717 /* Deal with blocking queues. If both updatees have blocked
2718 * threads, then we should merge the queues into the update
2719 * frame that we're keeping.
2721 * Alternatively, we could just wake them up: they'll just go
2722 * straight to sleep on the proper blackhole! This is less code
2723 * and probably less bug prone, although it's probably much
2726 #if 0 /* do it properly... */
2727 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2728 /* Sigh. It has one. Don't lose those threads! */
2729 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2730 /* Urgh. Two queues. Merge them. */
2731 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2733 while (keep_tso->link != END_TSO_QUEUE) {
2734 keep_tso = keep_tso->link;
2736 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2739 /* For simplicity, just swap the BQ for the BH */
2740 P_ temp = updatee_keep;
2742 updatee_keep = updatee_bypass;
2743 updatee_bypass = temp;
2745 /* Record the swap in the kept frame (below) */
2746 prev_frame->updatee = updatee_keep;
2751 TICK_UPD_SQUEEZED();
2752 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2754 sp = (P_)frame - 1; /* sp = stuff to slide */
2755 displacement += sizeofW(StgUpdateFrame);
2758 /* No squeeze for this frame */
2759 sp = frame_bottom - 1; /* Keep the current frame */
2761 /* Do lazy black-holing.
2763 if (is_update_frame) {
2764 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2765 if (bh->header.info != &BLACKHOLE_BQ_info &&
2766 bh->header.info != &CAF_BLACKHOLE_info) {
2767 SET_INFO(bh,&BLACKHOLE_info);
2771 /* Fix the link in the current frame (should point to the frame below) */
2772 frame->link = prev_frame;
2773 prev_was_update_frame = is_update_frame;
2776 /* Now slide all words from sp up to the next frame */
2778 if (displacement > 0) {
2779 P_ next_frame_bottom;
2781 if (next_frame != NULL)
2782 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2784 next_frame_bottom = tso->sp - 1;
2787 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2791 while (sp >= next_frame_bottom) {
2792 sp[displacement] = *sp;
2796 (P_)prev_frame = (P_)frame + displacement;
2800 tso->sp += displacement;
2801 tso->su = prev_frame;
2804 /* -----------------------------------------------------------------------------
2807 * We have to prepare for GC - this means doing lazy black holing
2808 * here. We also take the opportunity to do stack squeezing if it's
2810 * -------------------------------------------------------------------------- */
2813 threadPaused(StgTSO *tso)
2815 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2816 threadSqueezeStack(tso); /* does black holing too */
2818 threadLazyBlackHole(tso);