1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.23 1999/02/02 14:21:29 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
23 #include "StablePriv.h"
27 /* STATIC OBJECT LIST.
30 * We maintain a linked list of static objects that are still live.
31 * The requirements for this list are:
33 * - we need to scan the list while adding to it, in order to
34 * scavenge all the static objects (in the same way that
35 * breadth-first scavenging works for dynamic objects).
37 * - we need to be able to tell whether an object is already on
38 * the list, to break loops.
40 * Each static object has a "static link field", which we use for
41 * linking objects on to the list. We use a stack-type list, consing
42 * objects on the front as they are added (this means that the
43 * scavenge phase is depth-first, not breadth-first, but that
46 * A separate list is kept for objects that have been scavenged
47 * already - this is so that we can zero all the marks afterwards.
49 * An object is on the list if its static link field is non-zero; this
50 * means that we have to mark the end of the list with '1', not NULL.
52 * Extra notes for generational GC:
54 * Each generation has a static object list associated with it. When
55 * collecting generations up to N, we treat the static object lists
56 * from generations > N as roots.
58 * We build up a static object list while collecting generations 0..N,
59 * which is then appended to the static object list of generation N+1.
61 StgClosure* static_objects; /* live static objects */
62 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
64 /* N is the oldest generation being collected, where the generations
65 * are numbered starting at 0. A major GC (indicated by the major_gc
66 * flag) is when we're collecting all generations. We only attempt to
67 * deal with static objects and GC CAFs when doing a major GC.
70 static rtsBool major_gc;
72 /* Youngest generation that objects should be evacuated to in
73 * evacuate(). (Logically an argument to evacuate, but it's static
74 * a lot of the time so we optimise it into a global variable).
80 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
81 static rtsBool weak_done; /* all done for this pass */
83 /* Flag indicating failure to evacuate an object to the desired
86 static rtsBool failed_to_evac;
88 /* Old to-space (used for two-space collector only)
92 /* -----------------------------------------------------------------------------
93 Static function declarations
94 -------------------------------------------------------------------------- */
96 static StgClosure *evacuate(StgClosure *q);
97 static void zeroStaticObjectList(StgClosure* first_static);
98 static rtsBool traverse_weak_ptr_list(void);
99 static void zeroMutableList(StgMutClosure *first);
100 static void revertDeadCAFs(void);
102 static void scavenge_stack(StgPtr p, StgPtr stack_end);
103 static void scavenge_large(step *step);
104 static void scavenge(step *step);
105 static void scavenge_static(void);
106 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
107 static StgMutClosure *scavenge_mut_once_list(StgMutClosure *p, nat gen);
110 static void gcCAFs(void);
113 /* -----------------------------------------------------------------------------
116 For garbage collecting generation N (and all younger generations):
118 - follow all pointers in the root set. the root set includes all
119 mutable objects in all steps in all generations.
121 - for each pointer, evacuate the object it points to into either
122 + to-space in the next higher step in that generation, if one exists,
123 + if the object's generation == N, then evacuate it to the next
124 generation if one exists, or else to-space in the current
126 + if the object's generation < N, then evacuate it to to-space
127 in the next generation.
129 - repeatedly scavenge to-space from each step in each generation
130 being collected until no more objects can be evacuated.
132 - free from-space in each step, and set from-space = to-space.
134 -------------------------------------------------------------------------- */
136 void GarbageCollect(void (*get_roots)(void))
140 lnat live, allocated, collected = 0;
144 CostCentreStack *prev_CCS;
147 /* tell the stats department that we've started a GC */
150 /* attribute any costs to CCS_GC */
156 /* We might have been called from Haskell land by _ccall_GC, in
157 * which case we need to call threadPaused() because the scheduler
158 * won't have done it.
160 if (CurrentTSO) { threadPaused(CurrentTSO); }
162 /* Approximate how much we allocated: number of blocks in the
163 * nursery + blocks allocated via allocate() - unused nusery blocks.
164 * This leaves a little slop at the end of each block, and doesn't
165 * take into account large objects (ToDo).
167 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
168 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
169 allocated -= BLOCK_SIZE_W;
172 /* Figure out which generation to collect
175 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
176 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
180 major_gc = (N == RtsFlags.GcFlags.generations-1);
182 /* check stack sanity *before* GC (ToDo: check all threads) */
183 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
184 IF_DEBUG(sanity, checkFreeListSanity());
186 /* Initialise the static object lists
188 static_objects = END_OF_STATIC_LIST;
189 scavenged_static_objects = END_OF_STATIC_LIST;
191 /* zero the mutable list for the oldest generation (see comment by
192 * zeroMutableList below).
195 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
198 /* Save the old to-space if we're doing a two-space collection
200 if (RtsFlags.GcFlags.generations == 1) {
201 old_to_space = g0s0->to_space;
202 g0s0->to_space = NULL;
205 /* Initialise to-space in all the generations/steps that we're
208 for (g = 0; g <= N; g++) {
209 generations[g].mut_once_list = END_MUT_LIST;
210 generations[g].mut_list = END_MUT_LIST;
212 for (s = 0; s < generations[g].n_steps; s++) {
214 /* generation 0, step 0 doesn't need to-space */
215 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
219 /* Get a free block for to-space. Extra blocks will be chained on
223 step = &generations[g].steps[s];
224 ASSERT(step->gen->no == g);
225 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
226 bd->gen = &generations[g];
229 bd->evacuated = 1; /* it's a to-space block */
230 step->hp = bd->start;
231 step->hpLim = step->hp + BLOCK_SIZE_W;
234 step->to_blocks = 1; /* ???? */
235 step->scan = bd->start;
237 step->new_large_objects = NULL;
238 step->scavenged_large_objects = NULL;
239 /* mark the large objects as not evacuated yet */
240 for (bd = step->large_objects; bd; bd = bd->link) {
246 /* make sure the older generations have at least one block to
247 * allocate into (this makes things easier for copy(), see below.
249 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
250 for (s = 0; s < generations[g].n_steps; s++) {
251 step = &generations[g].steps[s];
252 if (step->hp_bd == NULL) {
254 bd->gen = &generations[g];
257 bd->evacuated = 0; /* *not* a to-space block */
258 step->hp = bd->start;
259 step->hpLim = step->hp + BLOCK_SIZE_W;
264 /* Set the scan pointer for older generations: remember we
265 * still have to scavenge objects that have been promoted. */
266 step->scan = step->hp;
267 step->scan_bd = step->hp_bd;
268 step->to_space = NULL;
270 step->new_large_objects = NULL;
271 step->scavenged_large_objects = NULL;
275 /* -----------------------------------------------------------------------
276 * follow all the roots that we know about:
277 * - mutable lists from each generation > N
278 * we want to *scavenge* these roots, not evacuate them: they're not
279 * going to move in this GC.
280 * Also: do them in reverse generation order. This is because we
281 * often want to promote objects that are pointed to by older
282 * generations early, so we don't have to repeatedly copy them.
283 * Doing the generations in reverse order ensures that we don't end
284 * up in the situation where we want to evac an object to gen 3 and
285 * it has already been evaced to gen 2.
288 StgMutClosure *tmp, **pp;
289 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
290 generations[g].saved_mut_list = generations[g].mut_list;
291 generations[g].mut_list = END_MUT_LIST;
294 /* Do the mut-once lists first */
295 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
296 generations[g].mut_once_list =
297 scavenge_mut_once_list(generations[g].mut_once_list, g);
300 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
301 tmp = scavenge_mutable_list(generations[g].saved_mut_list, g);
302 pp = &generations[g].mut_list;
303 while (*pp != END_MUT_LIST) {
304 pp = &(*pp)->mut_link;
310 /* follow all the roots that the application knows about.
315 /* And don't forget to mark the TSO if we got here direct from
318 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
321 /* Mark the weak pointer list, and prepare to detect dead weak
325 old_weak_ptr_list = weak_ptr_list;
326 weak_ptr_list = NULL;
327 weak_done = rtsFalse;
329 /* Mark the stable pointer table.
331 markStablePtrTable(major_gc);
335 /* ToDo: To fix the caf leak, we need to make the commented out
336 * parts of this code do something sensible - as described in
339 extern void markHugsObjects(void);
341 /* ToDo: This (undefined) function should contain the scavenge
342 * loop immediately below this block of code - but I'm not sure
343 * enough of the details to do this myself.
345 scavengeEverything();
346 /* revert dead CAFs and update enteredCAFs list */
351 /* This will keep the CAFs and the attached BCOs alive
352 * but the values will have been reverted
354 scavengeEverything();
359 /* -------------------------------------------------------------------------
360 * Repeatedly scavenge all the areas we know about until there's no
361 * more scavenging to be done.
368 /* scavenge static objects */
369 if (major_gc && static_objects != END_OF_STATIC_LIST) {
373 /* When scavenging the older generations: Objects may have been
374 * evacuated from generations <= N into older generations, and we
375 * need to scavenge these objects. We're going to try to ensure that
376 * any evacuations that occur move the objects into at least the
377 * same generation as the object being scavenged, otherwise we
378 * have to create new entries on the mutable list for the older
382 /* scavenge each step in generations 0..maxgen */
385 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
386 for (s = 0; s < generations[gen].n_steps; s++) {
387 step = &generations[gen].steps[s];
389 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
393 if (step->new_large_objects != NULL) {
394 scavenge_large(step);
400 if (flag) { goto loop; }
402 /* must be last... */
403 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
408 /* Now see which stable names are still alive
410 gcStablePtrTable(major_gc);
412 /* Set the maximum blocks for the oldest generation, based on twice
413 * the amount of live data now, adjusted to fit the maximum heap
416 * This is an approximation, since in the worst case we'll need
417 * twice the amount of live data plus whatever space the other
420 if (RtsFlags.GcFlags.generations > 1) {
422 oldest_gen->max_blocks =
423 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
424 RtsFlags.GcFlags.minOldGenSize);
425 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
426 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
427 if (((int)oldest_gen->max_blocks -
428 (int)oldest_gen->steps[0].to_blocks) <
429 (RtsFlags.GcFlags.pcFreeHeap *
430 RtsFlags.GcFlags.maxHeapSize / 200)) {
437 /* run through all the generations/steps and tidy up
439 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
442 generations[g].collections++; /* for stats */
445 for (s = 0; s < generations[g].n_steps; s++) {
447 step = &generations[g].steps[s];
449 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
450 /* Tidy the end of the to-space chains */
451 step->hp_bd->free = step->hp;
452 step->hp_bd->link = NULL;
455 /* for generations we collected... */
458 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
460 /* free old memory and shift to-space into from-space for all
461 * the collected steps (except the allocation area). These
462 * freed blocks will probaby be quickly recycled.
464 if (!(g == 0 && s == 0)) {
465 freeChain(step->blocks);
466 step->blocks = step->to_space;
467 step->n_blocks = step->to_blocks;
468 step->to_space = NULL;
470 for (bd = step->blocks; bd != NULL; bd = bd->link) {
471 bd->evacuated = 0; /* now from-space */
475 /* LARGE OBJECTS. The current live large objects are chained on
476 * scavenged_large, having been moved during garbage
477 * collection from large_objects. Any objects left on
478 * large_objects list are therefore dead, so we free them here.
480 for (bd = step->large_objects; bd != NULL; bd = next) {
485 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
488 step->large_objects = step->scavenged_large_objects;
490 /* Set the maximum blocks for this generation, interpolating
491 * between the maximum size of the oldest and youngest
494 * max_blocks = oldgen_max_blocks * G
495 * ----------------------
499 generations[g].max_blocks = (oldest_gen->max_blocks * g)
500 / (RtsFlags.GcFlags.generations-1);
503 /* for older generations... */
506 /* For older generations, we need to append the
507 * scavenged_large_object list (i.e. large objects that have been
508 * promoted during this GC) to the large_object list for that step.
510 for (bd = step->scavenged_large_objects; bd; bd = next) {
513 dbl_link_onto(bd, &step->large_objects);
516 /* add the new blocks we promoted during this GC */
517 step->n_blocks += step->to_blocks;
522 /* Guess the amount of live data for stats. */
525 /* Two-space collector:
526 * Free the old to-space, and estimate the amount of live data.
528 if (RtsFlags.GcFlags.generations == 1) {
531 if (old_to_space != NULL) {
532 freeChain(old_to_space);
534 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
535 bd->evacuated = 0; /* now from-space */
538 /* For a two-space collector, we need to resize the nursery. */
540 /* set up a new nursery. Allocate a nursery size based on a
541 * function of the amount of live data (currently a factor of 2,
542 * should be configurable (ToDo)). Use the blocks from the old
543 * nursery if possible, freeing up any left over blocks.
545 * If we get near the maximum heap size, then adjust our nursery
546 * size accordingly. If the nursery is the same size as the live
547 * data (L), then we need 3L bytes. We can reduce the size of the
548 * nursery to bring the required memory down near 2L bytes.
550 * A normal 2-space collector would need 4L bytes to give the same
551 * performance we get from 3L bytes, reducing to the same
552 * performance at 2L bytes.
554 blocks = g0s0->n_blocks;
556 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
557 RtsFlags.GcFlags.maxHeapSize ) {
558 int adjusted_blocks; /* signed on purpose */
561 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
562 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
563 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
564 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
567 blocks = adjusted_blocks;
570 blocks *= RtsFlags.GcFlags.oldGenFactor;
571 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
572 blocks = RtsFlags.GcFlags.minAllocAreaSize;
575 resizeNursery(blocks);
578 /* Generational collector:
579 * If the user has given us a suggested heap size, adjust our
580 * allocation area to make best use of the memory available.
583 if (RtsFlags.GcFlags.heapSizeSuggestion) {
585 nat needed = calcNeeded(); /* approx blocks needed at next GC */
587 /* Guess how much will be live in generation 0 step 0 next time.
588 * A good approximation is the amount of data that was live this
589 * time: this assumes (1) that the size of G0S0 will be roughly
590 * the same as last time, and (2) that the promotion rate will be
593 * If we don't know how much was live in G0S0 (because there's no
594 * step 1), then assume 30% (which is usually an overestimate).
596 if (g0->n_steps == 1) {
597 needed += (g0s0->n_blocks * 30) / 100;
599 needed += g0->steps[1].n_blocks;
602 /* Now we have a rough guess at the number of blocks needed for
603 * the next GC, subtract this from the user's suggested heap size
604 * and use the rest for the allocation area.
606 blocks = (int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed;
608 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
609 blocks = RtsFlags.GcFlags.minAllocAreaSize;
612 resizeNursery((nat)blocks);
616 /* revert dead CAFs and update enteredCAFs list */
619 /* mark the garbage collected CAFs as dead */
621 if (major_gc) { gcCAFs(); }
624 /* zero the scavenged static object list */
626 zeroStaticObjectList(scavenged_static_objects);
631 for (bd = g0s0->blocks; bd; bd = bd->link) {
632 bd->free = bd->start;
633 ASSERT(bd->gen == g0);
634 ASSERT(bd->step == g0s0);
636 current_nursery = g0s0->blocks;
638 /* Free the small objects allocated via allocate(), since this will
639 * all have been copied into G0S1 now.
641 if (small_alloc_list != NULL) {
642 freeChain(small_alloc_list);
644 small_alloc_list = NULL;
646 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
648 /* start any pending finalisers */
649 scheduleFinalisers(old_weak_ptr_list);
651 /* check sanity after GC */
652 IF_DEBUG(sanity, checkSanity(N));
654 /* extra GC trace info */
655 IF_DEBUG(gc, stat_describe_gens());
658 /* symbol-table based profiling */
659 /* heapCensus(to_space); */ /* ToDo */
662 /* restore enclosing cost centre */
667 /* check for memory leaks if sanity checking is on */
668 IF_DEBUG(sanity, memInventory());
670 /* ok, GC over: tell the stats department what happened. */
671 stat_endGC(allocated, collected, live, N);
674 /* -----------------------------------------------------------------------------
677 traverse_weak_ptr_list is called possibly many times during garbage
678 collection. It returns a flag indicating whether it did any work
679 (i.e. called evacuate on any live pointers).
681 Invariant: traverse_weak_ptr_list is called when the heap is in an
682 idempotent state. That means that there are no pending
683 evacuate/scavenge operations. This invariant helps the weak
684 pointer code decide which weak pointers are dead - if there are no
685 new live weak pointers, then all the currently unreachable ones are
688 For generational GC: we just don't try to finalise weak pointers in
689 older generations than the one we're collecting. This could
690 probably be optimised by keeping per-generation lists of weak
691 pointers, but for a few weak pointers this scheme will work.
692 -------------------------------------------------------------------------- */
695 traverse_weak_ptr_list(void)
697 StgWeak *w, **last_w, *next_w;
699 rtsBool flag = rtsFalse;
701 if (weak_done) { return rtsFalse; }
703 /* doesn't matter where we evacuate values/finalisers to, since
704 * these pointers are treated as roots (iff the keys are alive).
708 last_w = &old_weak_ptr_list;
709 for (w = old_weak_ptr_list; w; w = next_w) {
711 if ((new = isAlive(w->key))) {
713 /* evacuate the value and finaliser */
714 w->value = evacuate(w->value);
715 w->finaliser = evacuate(w->finaliser);
716 /* remove this weak ptr from the old_weak_ptr list */
718 /* and put it on the new weak ptr list */
720 w->link = weak_ptr_list;
723 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
733 /* If we didn't make any changes, then we can go round and kill all
734 * the dead weak pointers. The old_weak_ptr list is used as a list
735 * of pending finalisers later on.
737 if (flag == rtsFalse) {
738 for (w = old_weak_ptr_list; w; w = w->link) {
739 w->value = evacuate(w->value);
740 w->finaliser = evacuate(w->finaliser);
748 /* -----------------------------------------------------------------------------
749 isAlive determines whether the given closure is still alive (after
750 a garbage collection) or not. It returns the new address of the
751 closure if it is alive, or NULL otherwise.
752 -------------------------------------------------------------------------- */
755 isAlive(StgClosure *p)
763 /* ToDo: for static closures, check the static link field.
764 * Problem here is that we sometimes don't set the link field, eg.
765 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
768 /* ignore closures in generations that we're not collecting. */
769 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
773 switch (info->type) {
778 case IND_OLDGEN: /* rely on compatible layout with StgInd */
779 case IND_OLDGEN_PERM:
780 /* follow indirections */
781 p = ((StgInd *)p)->indirectee;
786 return ((StgEvacuated *)p)->evacuee;
796 MarkRoot(StgClosure *root)
798 return evacuate(root);
801 static void addBlock(step *step)
803 bdescr *bd = allocBlock();
807 if (step->gen->no <= N) {
813 step->hp_bd->free = step->hp;
814 step->hp_bd->link = bd;
815 step->hp = bd->start;
816 step->hpLim = step->hp + BLOCK_SIZE_W;
821 static __inline__ StgClosure *
822 copy(StgClosure *src, nat size, step *step)
826 TICK_GC_WORDS_COPIED(size);
827 /* Find out where we're going, using the handy "to" pointer in
828 * the step of the source object. If it turns out we need to
829 * evacuate to an older generation, adjust it here (see comment
832 if (step->gen->no < evac_gen) {
833 step = &generations[evac_gen].steps[0];
836 /* chain a new block onto the to-space for the destination step if
839 if (step->hp + size >= step->hpLim) {
843 for(to = step->hp, from = (P_)src; size>0; --size) {
849 return (StgClosure *)dest;
852 /* Special version of copy() for when we only want to copy the info
853 * pointer of an object, but reserve some padding after it. This is
854 * used to optimise evacuation of BLACKHOLEs.
857 static __inline__ StgClosure *
858 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
862 TICK_GC_WORDS_COPIED(size_to_copy);
863 if (step->gen->no < evac_gen) {
864 step = &generations[evac_gen].steps[0];
867 if (step->hp + size_to_reserve >= step->hpLim) {
871 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
876 step->hp += size_to_reserve;
877 return (StgClosure *)dest;
880 static __inline__ void
881 upd_evacuee(StgClosure *p, StgClosure *dest)
883 StgEvacuated *q = (StgEvacuated *)p;
885 SET_INFO(q,&EVACUATED_info);
889 /* -----------------------------------------------------------------------------
890 Evacuate a large object
892 This just consists of removing the object from the (doubly-linked)
893 large_alloc_list, and linking it on to the (singly-linked)
894 new_large_objects list, from where it will be scavenged later.
896 Convention: bd->evacuated is /= 0 for a large object that has been
897 evacuated, or 0 otherwise.
898 -------------------------------------------------------------------------- */
901 evacuate_large(StgPtr p, rtsBool mutable)
903 bdescr *bd = Bdescr(p);
906 /* should point to the beginning of the block */
907 ASSERT(((W_)p & BLOCK_MASK) == 0);
909 /* already evacuated? */
911 /* Don't forget to set the failed_to_evac flag if we didn't get
912 * the desired destination (see comments in evacuate()).
914 if (bd->gen->no < evac_gen) {
915 failed_to_evac = rtsTrue;
916 TICK_GC_FAILED_PROMOTION();
922 /* remove from large_object list */
924 bd->back->link = bd->link;
925 } else { /* first object in the list */
926 step->large_objects = bd->link;
929 bd->link->back = bd->back;
932 /* link it on to the evacuated large object list of the destination step
935 if (step->gen->no < evac_gen) {
936 step = &generations[evac_gen].steps[0];
941 bd->link = step->new_large_objects;
942 step->new_large_objects = bd;
946 recordMutable((StgMutClosure *)p);
950 /* -----------------------------------------------------------------------------
951 Adding a MUT_CONS to an older generation.
953 This is necessary from time to time when we end up with an
954 old-to-new generation pointer in a non-mutable object. We defer
955 the promotion until the next GC.
956 -------------------------------------------------------------------------- */
959 mkMutCons(StgClosure *ptr, generation *gen)
964 step = &gen->steps[0];
966 /* chain a new block onto the to-space for the destination step if
969 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
973 q = (StgMutVar *)step->hp;
974 step->hp += sizeofW(StgMutVar);
976 SET_HDR(q,&MUT_CONS_info,CCS_GC);
978 recordOldToNewPtrs((StgMutClosure *)q);
980 return (StgClosure *)q;
983 /* -----------------------------------------------------------------------------
986 This is called (eventually) for every live object in the system.
988 The caller to evacuate specifies a desired generation in the
989 evac_gen global variable. The following conditions apply to
990 evacuating an object which resides in generation M when we're
991 collecting up to generation N
995 else evac to step->to
997 if M < evac_gen evac to evac_gen, step 0
999 if the object is already evacuated, then we check which generation
1002 if M >= evac_gen do nothing
1003 if M < evac_gen set failed_to_evac flag to indicate that we
1004 didn't manage to evacuate this object into evac_gen.
1006 -------------------------------------------------------------------------- */
1010 evacuate(StgClosure *q)
1015 const StgInfoTable *info;
1018 if (!LOOKS_LIKE_STATIC(q)) {
1020 if (bd->gen->no > N) {
1021 /* Can't evacuate this object, because it's in a generation
1022 * older than the ones we're collecting. Let's hope that it's
1023 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1025 if (bd->gen->no < evac_gen) {
1027 failed_to_evac = rtsTrue;
1028 TICK_GC_FAILED_PROMOTION();
1032 step = bd->step->to;
1035 /* make sure the info pointer is into text space */
1036 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1037 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1040 switch (info -> type) {
1043 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),step);
1048 ASSERT(q->header.info != &MUT_CONS_info);
1050 to = copy(q,sizeW_fromITBL(info),step);
1052 recordMutable((StgMutClosure *)to);
1056 stable_ptr_table[((StgStableName *)q)->sn].keep = rtsTrue;
1057 to = copy(q,sizeofW(StgStableName),step);
1065 to = copy(q,sizeofW(StgHeader)+1,step);
1069 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1080 to = copy(q,sizeofW(StgHeader)+2,step);
1088 case IND_OLDGEN_PERM:
1093 to = copy(q,sizeW_fromITBL(info),step);
1099 to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1104 to = copy(q,BLACKHOLE_sizeW(),step);
1106 recordMutable((StgMutClosure *)to);
1109 case THUNK_SELECTOR:
1111 const StgInfoTable* selectee_info;
1112 StgClosure* selectee = ((StgSelector*)q)->selectee;
1115 selectee_info = get_itbl(selectee);
1116 switch (selectee_info->type) {
1125 StgNat32 offset = info->layout.selector_offset;
1127 /* check that the size is in range */
1129 (StgNat32)(selectee_info->layout.payload.ptrs +
1130 selectee_info->layout.payload.nptrs));
1132 /* perform the selection! */
1133 q = selectee->payload[offset];
1135 /* if we're already in to-space, there's no need to continue
1136 * with the evacuation, just update the source address with
1137 * a pointer to the (evacuated) constructor field.
1139 if (IS_USER_PTR(q)) {
1140 bdescr *bd = Bdescr((P_)q);
1141 if (bd->evacuated) {
1142 if (bd->gen->no < evac_gen) {
1143 failed_to_evac = rtsTrue;
1144 TICK_GC_FAILED_PROMOTION();
1150 /* otherwise, carry on and evacuate this constructor field,
1151 * (but not the constructor itself)
1160 case IND_OLDGEN_PERM:
1161 selectee = stgCast(StgInd *,selectee)->indirectee;
1165 selectee = stgCast(StgCAF *,selectee)->value;
1169 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1179 case THUNK_SELECTOR:
1180 /* aargh - do recursively???? */
1185 /* not evaluated yet */
1189 barf("evacuate: THUNK_SELECTOR: strange selectee");
1192 to = copy(q,THUNK_SELECTOR_sizeW(),step);
1198 /* follow chains of indirections, don't evacuate them */
1199 q = ((StgInd*)q)->indirectee;
1202 /* ToDo: optimise STATIC_LINK for known cases.
1203 - FUN_STATIC : payload[0]
1204 - THUNK_STATIC : payload[1]
1205 - IND_STATIC : payload[1]
1209 if (info->srt_len == 0) { /* small optimisation */
1215 /* don't want to evacuate these, but we do want to follow pointers
1216 * from SRTs - see scavenge_static.
1219 /* put the object on the static list, if necessary.
1221 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1222 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1223 static_objects = (StgClosure *)q;
1227 case CONSTR_INTLIKE:
1228 case CONSTR_CHARLIKE:
1229 case CONSTR_NOCAF_STATIC:
1230 /* no need to put these on the static linked list, they don't need
1245 /* shouldn't see these */
1246 barf("evacuate: stack frame\n");
1250 /* these are special - the payload is a copy of a chunk of stack,
1252 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),step);
1257 /* Already evacuated, just return the forwarding address.
1258 * HOWEVER: if the requested destination generation (evac_gen) is
1259 * older than the actual generation (because the object was
1260 * already evacuated to a younger generation) then we have to
1261 * set the failed_to_evac flag to indicate that we couldn't
1262 * manage to promote the object to the desired generation.
1264 if (evac_gen > 0) { /* optimisation */
1265 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1266 if (Bdescr((P_)p)->gen->no < evac_gen) {
1267 /* fprintf(stderr,"evac failed!\n");*/
1268 failed_to_evac = rtsTrue;
1269 TICK_GC_FAILED_PROMOTION();
1272 return ((StgEvacuated*)q)->evacuee;
1277 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1279 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1280 evacuate_large((P_)q, rtsFalse);
1283 /* just copy the block */
1284 to = copy(q,size,step);
1291 case MUT_ARR_PTRS_FROZEN:
1293 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1295 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1296 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1299 /* just copy the block */
1300 to = copy(q,size,step);
1302 if (info->type == MUT_ARR_PTRS) {
1303 recordMutable((StgMutClosure *)to);
1311 StgTSO *tso = stgCast(StgTSO *,q);
1312 nat size = tso_sizeW(tso);
1315 /* Large TSOs don't get moved, so no relocation is required.
1317 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1318 evacuate_large((P_)q, rtsTrue);
1321 /* To evacuate a small TSO, we need to relocate the update frame
1325 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1327 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1329 /* relocate the stack pointers... */
1330 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1331 new_tso->sp = (StgPtr)new_tso->sp + diff;
1332 new_tso->splim = (StgPtr)new_tso->splim + diff;
1334 relocate_TSO(tso, new_tso);
1335 upd_evacuee(q,(StgClosure *)new_tso);
1337 recordMutable((StgMutClosure *)new_tso);
1338 return (StgClosure *)new_tso;
1344 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1348 barf("evacuate: strange closure type");
1354 /* -----------------------------------------------------------------------------
1355 relocate_TSO is called just after a TSO has been copied from src to
1356 dest. It adjusts the update frame list for the new location.
1357 -------------------------------------------------------------------------- */
1360 relocate_TSO(StgTSO *src, StgTSO *dest)
1367 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1371 while ((P_)su < dest->stack + dest->stack_size) {
1372 switch (get_itbl(su)->type) {
1374 /* GCC actually manages to common up these three cases! */
1377 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1382 cf = (StgCatchFrame *)su;
1383 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1388 sf = (StgSeqFrame *)su;
1389 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1398 barf("relocate_TSO");
1407 scavenge_srt(const StgInfoTable *info)
1409 StgClosure **srt, **srt_end;
1411 /* evacuate the SRT. If srt_len is zero, then there isn't an
1412 * srt field in the info table. That's ok, because we'll
1413 * never dereference it.
1415 srt = stgCast(StgClosure **,info->srt);
1416 srt_end = srt + info->srt_len;
1417 for (; srt < srt_end; srt++) {
1422 /* -----------------------------------------------------------------------------
1423 Scavenge a given step until there are no more objects in this step
1426 evac_gen is set by the caller to be either zero (for a step in a
1427 generation < N) or G where G is the generation of the step being
1430 We sometimes temporarily change evac_gen back to zero if we're
1431 scavenging a mutable object where early promotion isn't such a good
1433 -------------------------------------------------------------------------- */
1437 scavenge(step *step)
1440 const StgInfoTable *info;
1442 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1447 failed_to_evac = rtsFalse;
1449 /* scavenge phase - standard breadth-first scavenging of the
1453 while (bd != step->hp_bd || p < step->hp) {
1455 /* If we're at the end of this block, move on to the next block */
1456 if (bd != step->hp_bd && p == bd->free) {
1462 q = p; /* save ptr to object */
1464 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1465 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1467 info = get_itbl((StgClosure *)p);
1468 switch (info -> type) {
1472 StgBCO* bco = stgCast(StgBCO*,p);
1474 for (i = 0; i < bco->n_ptrs; i++) {
1475 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1477 p += bco_sizeW(bco);
1482 /* treat MVars specially, because we don't want to evacuate the
1483 * mut_link field in the middle of the closure.
1486 StgMVar *mvar = ((StgMVar *)p);
1488 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1489 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1490 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1491 p += sizeofW(StgMVar);
1492 evac_gen = saved_evac_gen;
1500 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1501 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1502 p += sizeofW(StgHeader) + 2;
1507 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1508 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1514 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1515 p += sizeofW(StgHeader) + 1;
1520 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1526 p += sizeofW(StgHeader) + 1;
1533 p += sizeofW(StgHeader) + 2;
1540 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1541 p += sizeofW(StgHeader) + 2;
1554 case IND_OLDGEN_PERM:
1560 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1561 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1562 (StgClosure *)*p = evacuate((StgClosure *)*p);
1564 p += info->layout.payload.nptrs;
1569 /* ignore MUT_CONSs */
1570 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1572 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1573 evac_gen = saved_evac_gen;
1575 p += sizeofW(StgMutVar);
1580 p += BLACKHOLE_sizeW();
1585 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1586 (StgClosure *)bh->blocking_queue =
1587 evacuate((StgClosure *)bh->blocking_queue);
1588 if (failed_to_evac) {
1589 failed_to_evac = rtsFalse;
1590 recordMutable((StgMutClosure *)bh);
1592 p += BLACKHOLE_sizeW();
1596 case THUNK_SELECTOR:
1598 StgSelector *s = (StgSelector *)p;
1599 s->selectee = evacuate(s->selectee);
1600 p += THUNK_SELECTOR_sizeW();
1606 barf("scavenge:IND???\n");
1608 case CONSTR_INTLIKE:
1609 case CONSTR_CHARLIKE:
1611 case CONSTR_NOCAF_STATIC:
1615 /* Shouldn't see a static object here. */
1616 barf("scavenge: STATIC object\n");
1628 /* Shouldn't see stack frames here. */
1629 barf("scavenge: stack frame\n");
1631 case AP_UPD: /* same as PAPs */
1633 /* Treat a PAP just like a section of stack, not forgetting to
1634 * evacuate the function pointer too...
1637 StgPAP* pap = stgCast(StgPAP*,p);
1639 pap->fun = evacuate(pap->fun);
1640 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1641 p += pap_sizeW(pap);
1647 /* nothing to follow */
1648 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1652 /* follow everything */
1656 evac_gen = 0; /* repeatedly mutable */
1657 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1658 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1659 (StgClosure *)*p = evacuate((StgClosure *)*p);
1661 evac_gen = saved_evac_gen;
1665 case MUT_ARR_PTRS_FROZEN:
1666 /* follow everything */
1668 StgPtr start = p, next;
1670 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1671 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1672 (StgClosure *)*p = evacuate((StgClosure *)*p);
1674 if (failed_to_evac) {
1675 /* we can do this easier... */
1676 recordMutable((StgMutClosure *)start);
1677 failed_to_evac = rtsFalse;
1688 /* chase the link field for any TSOs on the same queue */
1689 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1690 /* scavenge this thread's stack */
1691 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1692 evac_gen = saved_evac_gen;
1693 p += tso_sizeW(tso);
1700 barf("scavenge: unimplemented/strange closure type\n");
1706 /* If we didn't manage to promote all the objects pointed to by
1707 * the current object, then we have to designate this object as
1708 * mutable (because it contains old-to-new generation pointers).
1710 if (failed_to_evac) {
1711 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1712 failed_to_evac = rtsFalse;
1720 /* -----------------------------------------------------------------------------
1721 Scavenge one object.
1723 This is used for objects that are temporarily marked as mutable
1724 because they contain old-to-new generation pointers. Only certain
1725 objects can have this property.
1726 -------------------------------------------------------------------------- */
1728 scavenge_one(StgClosure *p)
1733 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1734 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1738 switch (info -> type) {
1741 case FUN_1_0: /* hardly worth specialising these guys */
1761 case IND_OLDGEN_PERM:
1767 end = (P_)p->payload + info->layout.payload.ptrs;
1768 for (q = (P_)p->payload; q < end; q++) {
1769 (StgClosure *)*q = evacuate((StgClosure *)*q);
1778 case THUNK_SELECTOR:
1780 StgSelector *s = (StgSelector *)p;
1781 s->selectee = evacuate(s->selectee);
1785 case AP_UPD: /* same as PAPs */
1787 /* Treat a PAP just like a section of stack, not forgetting to
1788 * evacuate the function pointer too...
1791 StgPAP* pap = (StgPAP *)p;
1793 pap->fun = evacuate(pap->fun);
1794 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1799 /* This might happen if for instance a MUT_CONS was pointing to a
1800 * THUNK which has since been updated. The IND_OLDGEN will
1801 * be on the mutable list anyway, so we don't need to do anything
1807 barf("scavenge_one: strange object");
1810 no_luck = failed_to_evac;
1811 failed_to_evac = rtsFalse;
1816 /* -----------------------------------------------------------------------------
1817 Scavenging mutable lists.
1819 We treat the mutable list of each generation > N (i.e. all the
1820 generations older than the one being collected) as roots. We also
1821 remove non-mutable objects from the mutable list at this point.
1822 -------------------------------------------------------------------------- */
1824 static StgMutClosure *
1825 scavenge_mut_once_list(StgMutClosure *p, nat gen)
1828 StgMutClosure *start;
1829 StgMutClosure **prev;
1835 failed_to_evac = rtsFalse;
1837 for (; p != END_MUT_LIST; p = *prev) {
1839 /* make sure the info pointer is into text space */
1840 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1841 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1844 switch(info->type) {
1847 case IND_OLDGEN_PERM:
1849 /* Try to pull the indirectee into this generation, so we can
1850 * remove the indirection from the mutable list.
1852 ((StgIndOldGen *)p)->indirectee =
1853 evacuate(((StgIndOldGen *)p)->indirectee);
1855 /* failed_to_evac might happen if we've got more than two
1856 * generations, we're collecting only generation 0, the
1857 * indirection resides in generation 2 and the indirectee is
1860 if (failed_to_evac) {
1861 failed_to_evac = rtsFalse;
1862 prev = &p->mut_link;
1864 *prev = p->mut_link;
1865 /* the mut_link field of an IND_STATIC is overloaded as the
1866 * static link field too (it just so happens that we don't need
1867 * both at the same time), so we need to NULL it out when
1868 * removing this object from the mutable list because the static
1869 * link fields are all assumed to be NULL before doing a major
1877 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
1878 * it from the mutable list if possible by promoting whatever it
1881 ASSERT(p->header.info == &MUT_CONS_info);
1882 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
1883 /* didn't manage to promote everything, so leave the
1884 * MUT_CONS on the list.
1886 prev = &p->mut_link;
1888 *prev = p->mut_link;
1893 /* shouldn't have anything else on the mutables list */
1894 barf("scavenge_mut_once_list: strange object?");
1901 static StgMutClosure *
1902 scavenge_mutable_list(StgMutClosure *p, nat gen)
1905 StgMutClosure *start;
1906 StgMutClosure **prev;
1913 failed_to_evac = rtsFalse;
1915 for (; p != END_MUT_LIST; p = *prev) {
1917 /* make sure the info pointer is into text space */
1918 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1919 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1922 switch(info->type) {
1924 case MUT_ARR_PTRS_FROZEN:
1925 /* remove this guy from the mutable list, but follow the ptrs
1926 * anyway (and make sure they get promoted to this gen).
1931 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1933 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1934 (StgClosure *)*q = evacuate((StgClosure *)*q);
1938 if (failed_to_evac) {
1939 failed_to_evac = rtsFalse;
1940 prev = &p->mut_link;
1942 *prev = p->mut_link;
1948 /* follow everything */
1949 prev = &p->mut_link;
1953 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1954 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1955 (StgClosure *)*q = evacuate((StgClosure *)*q);
1961 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1962 * it from the mutable list if possible by promoting whatever it
1965 ASSERT(p->header.info != &MUT_CONS_info);
1966 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1967 prev = &p->mut_link;
1972 StgMVar *mvar = (StgMVar *)p;
1973 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1974 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1975 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1976 prev = &p->mut_link;
1981 /* follow ptrs and remove this from the mutable list */
1983 StgTSO *tso = (StgTSO *)p;
1985 /* Don't bother scavenging if this thread is dead
1987 if (!(tso->whatNext == ThreadComplete ||
1988 tso->whatNext == ThreadKilled)) {
1989 /* Don't need to chase the link field for any TSOs on the
1990 * same queue. Just scavenge this thread's stack
1992 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1995 /* Don't take this TSO off the mutable list - it might still
1996 * point to some younger objects (because we set evac_gen to 0
1999 prev = &tso->mut_link;
2005 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2006 (StgClosure *)bh->blocking_queue =
2007 evacuate((StgClosure *)bh->blocking_queue);
2008 prev = &p->mut_link;
2013 /* shouldn't have anything else on the mutables list */
2014 barf("scavenge_mut_list: strange object?");
2021 scavenge_static(void)
2023 StgClosure* p = static_objects;
2024 const StgInfoTable *info;
2026 /* Always evacuate straight to the oldest generation for static
2028 evac_gen = oldest_gen->no;
2030 /* keep going until we've scavenged all the objects on the linked
2032 while (p != END_OF_STATIC_LIST) {
2036 /* make sure the info pointer is into text space */
2037 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2038 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2040 /* Take this object *off* the static_objects list,
2041 * and put it on the scavenged_static_objects list.
2043 static_objects = STATIC_LINK(info,p);
2044 STATIC_LINK(info,p) = scavenged_static_objects;
2045 scavenged_static_objects = p;
2047 switch (info -> type) {
2051 StgInd *ind = (StgInd *)p;
2052 ind->indirectee = evacuate(ind->indirectee);
2054 /* might fail to evacuate it, in which case we have to pop it
2055 * back on the mutable list (and take it off the
2056 * scavenged_static list because the static link and mut link
2057 * pointers are one and the same).
2059 if (failed_to_evac) {
2060 failed_to_evac = rtsFalse;
2061 scavenged_static_objects = STATIC_LINK(info,p);
2062 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2063 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2077 next = (P_)p->payload + info->layout.payload.ptrs;
2078 /* evacuate the pointers */
2079 for (q = (P_)p->payload; q < next; q++) {
2080 (StgClosure *)*q = evacuate((StgClosure *)*q);
2086 barf("scavenge_static");
2089 ASSERT(failed_to_evac == rtsFalse);
2091 /* get the next static object from the list. Remeber, there might
2092 * be more stuff on this list now that we've done some evacuating!
2093 * (static_objects is a global)
2099 /* -----------------------------------------------------------------------------
2100 scavenge_stack walks over a section of stack and evacuates all the
2101 objects pointed to by it. We can use the same code for walking
2102 PAPs, since these are just sections of copied stack.
2103 -------------------------------------------------------------------------- */
2106 scavenge_stack(StgPtr p, StgPtr stack_end)
2109 const StgInfoTable* info;
2113 * Each time around this loop, we are looking at a chunk of stack
2114 * that starts with either a pending argument section or an
2115 * activation record.
2118 while (p < stack_end) {
2119 q = *stgCast(StgPtr*,p);
2121 /* If we've got a tag, skip over that many words on the stack */
2122 if (IS_ARG_TAG(stgCast(StgWord,q))) {
2127 /* Is q a pointer to a closure?
2129 if (! LOOKS_LIKE_GHC_INFO(q)) {
2132 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
2133 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
2135 /* otherwise, must be a pointer into the allocation space.
2139 (StgClosure *)*p = evacuate((StgClosure *)q);
2145 * Otherwise, q must be the info pointer of an activation
2146 * record. All activation records have 'bitmap' style layout
2149 info = get_itbl(stgCast(StgClosure*,p));
2151 switch (info->type) {
2153 /* Dynamic bitmap: the mask is stored on the stack */
2155 bitmap = stgCast(StgRetDyn*,p)->liveness;
2156 p = &payloadWord(stgCast(StgRetDyn*,p),0);
2159 /* probably a slow-entry point return address: */
2165 /* Specialised code for update frames, since they're so common.
2166 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2167 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2171 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2173 StgClosureType type = get_itbl(frame->updatee)->type;
2175 p += sizeofW(StgUpdateFrame);
2176 if (type == EVACUATED) {
2177 frame->updatee = evacuate(frame->updatee);
2180 bdescr *bd = Bdescr((P_)frame->updatee);
2182 if (bd->gen->no > N) {
2183 if (bd->gen->no < evac_gen) {
2184 failed_to_evac = rtsTrue;
2188 step = bd->step->to;
2192 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2193 sizeofW(StgHeader), step);
2194 upd_evacuee(frame->updatee,to);
2195 frame->updatee = to;
2198 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2199 upd_evacuee(frame->updatee,to);
2200 frame->updatee = to;
2201 recordMutable((StgMutClosure *)to);
2204 barf("scavenge_stack: UPDATE_FRAME updatee");
2209 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2216 bitmap = info->layout.bitmap;
2219 while (bitmap != 0) {
2220 if ((bitmap & 1) == 0) {
2221 (StgClosure *)*p = evacuate((StgClosure *)*p);
2224 bitmap = bitmap >> 1;
2231 /* large bitmap (> 32 entries) */
2236 StgLargeBitmap *large_bitmap;
2239 large_bitmap = info->layout.large_bitmap;
2242 for (i=0; i<large_bitmap->size; i++) {
2243 bitmap = large_bitmap->bitmap[i];
2244 q = p + sizeof(W_) * 8;
2245 while (bitmap != 0) {
2246 if ((bitmap & 1) == 0) {
2247 (StgClosure *)*p = evacuate((StgClosure *)*p);
2250 bitmap = bitmap >> 1;
2252 if (i+1 < large_bitmap->size) {
2254 (StgClosure *)*p = evacuate((StgClosure *)*p);
2260 /* and don't forget to follow the SRT */
2265 barf("scavenge_stack: weird activation record found on stack.\n");
2270 /*-----------------------------------------------------------------------------
2271 scavenge the large object list.
2273 evac_gen set by caller; similar games played with evac_gen as with
2274 scavenge() - see comment at the top of scavenge(). Most large
2275 objects are (repeatedly) mutable, so most of the time evac_gen will
2277 --------------------------------------------------------------------------- */
2280 scavenge_large(step *step)
2284 const StgInfoTable* info;
2285 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2287 evac_gen = 0; /* most objects are mutable */
2288 bd = step->new_large_objects;
2290 for (; bd != NULL; bd = step->new_large_objects) {
2292 /* take this object *off* the large objects list and put it on
2293 * the scavenged large objects list. This is so that we can
2294 * treat new_large_objects as a stack and push new objects on
2295 * the front when evacuating.
2297 step->new_large_objects = bd->link;
2298 dbl_link_onto(bd, &step->scavenged_large_objects);
2301 info = get_itbl(stgCast(StgClosure*,p));
2303 switch (info->type) {
2305 /* only certain objects can be "large"... */
2309 /* nothing to follow */
2313 /* follow everything */
2317 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2318 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2319 (StgClosure *)*p = evacuate((StgClosure *)*p);
2324 case MUT_ARR_PTRS_FROZEN:
2325 /* follow everything */
2327 StgPtr start = p, next;
2329 evac_gen = saved_evac_gen; /* not really mutable */
2330 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2331 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2332 (StgClosure *)*p = evacuate((StgClosure *)*p);
2335 if (failed_to_evac) {
2336 recordMutable((StgMutClosure *)start);
2343 StgBCO* bco = stgCast(StgBCO*,p);
2345 evac_gen = saved_evac_gen;
2346 for (i = 0; i < bco->n_ptrs; i++) {
2347 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2358 /* chase the link field for any TSOs on the same queue */
2359 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2360 /* scavenge this thread's stack */
2361 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2366 barf("scavenge_large: unknown/strange object");
2372 zeroStaticObjectList(StgClosure* first_static)
2376 const StgInfoTable *info;
2378 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2380 link = STATIC_LINK(info, p);
2381 STATIC_LINK(info,p) = NULL;
2385 /* This function is only needed because we share the mutable link
2386 * field with the static link field in an IND_STATIC, so we have to
2387 * zero the mut_link field before doing a major GC, which needs the
2388 * static link field.
2390 * It doesn't do any harm to zero all the mutable link fields on the
2394 zeroMutableList(StgMutClosure *first)
2396 StgMutClosure *next, *c;
2398 for (c = first; c != END_MUT_LIST; c = next) {
2404 /* -----------------------------------------------------------------------------
2406 -------------------------------------------------------------------------- */
2408 void RevertCAFs(void)
2410 while (enteredCAFs != END_CAF_LIST) {
2411 StgCAF* caf = enteredCAFs;
2413 enteredCAFs = caf->link;
2414 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2415 SET_INFO(caf,&CAF_UNENTERED_info);
2416 caf->value = stgCast(StgClosure*,0xdeadbeef);
2417 caf->link = stgCast(StgCAF*,0xdeadbeef);
2421 void revertDeadCAFs(void)
2423 StgCAF* caf = enteredCAFs;
2424 enteredCAFs = END_CAF_LIST;
2425 while (caf != END_CAF_LIST) {
2426 StgCAF* next = caf->link;
2428 switch(GET_INFO(caf)->type) {
2431 /* This object has been evacuated, it must be live. */
2432 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2433 new->link = enteredCAFs;
2439 SET_INFO(caf,&CAF_UNENTERED_info);
2440 caf->value = stgCast(StgClosure*,0xdeadbeef);
2441 caf->link = stgCast(StgCAF*,0xdeadbeef);
2445 barf("revertDeadCAFs: enteredCAFs list corrupted");
2451 /* -----------------------------------------------------------------------------
2452 Sanity code for CAF garbage collection.
2454 With DEBUG turned on, we manage a CAF list in addition to the SRT
2455 mechanism. After GC, we run down the CAF list and blackhole any
2456 CAFs which have been garbage collected. This means we get an error
2457 whenever the program tries to enter a garbage collected CAF.
2459 Any garbage collected CAFs are taken off the CAF list at the same
2461 -------------------------------------------------------------------------- */
2469 const StgInfoTable *info;
2480 ASSERT(info->type == IND_STATIC);
2482 if (STATIC_LINK(info,p) == NULL) {
2483 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2485 SET_INFO(p,&BLACKHOLE_info);
2486 p = STATIC_LINK2(info,p);
2490 pp = &STATIC_LINK2(info,p);
2497 /* fprintf(stderr, "%d CAFs live\n", i); */
2501 /* -----------------------------------------------------------------------------
2504 Whenever a thread returns to the scheduler after possibly doing
2505 some work, we have to run down the stack and black-hole all the
2506 closures referred to by update frames.
2507 -------------------------------------------------------------------------- */
2510 threadLazyBlackHole(StgTSO *tso)
2512 StgUpdateFrame *update_frame;
2513 StgBlockingQueue *bh;
2516 stack_end = &tso->stack[tso->stack_size];
2517 update_frame = tso->su;
2520 switch (get_itbl(update_frame)->type) {
2523 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2527 bh = (StgBlockingQueue *)update_frame->updatee;
2529 /* if the thunk is already blackholed, it means we've also
2530 * already blackholed the rest of the thunks on this stack,
2531 * so we can stop early.
2533 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
2534 * don't interfere with this optimisation.
2536 if (bh->header.info == &BLACKHOLE_info) {
2540 if (bh->header.info != &BLACKHOLE_BQ_info &&
2541 bh->header.info != &CAF_BLACKHOLE_info) {
2542 SET_INFO(bh,&BLACKHOLE_info);
2545 update_frame = update_frame->link;
2549 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2555 barf("threadPaused");
2560 /* -----------------------------------------------------------------------------
2563 * Code largely pinched from old RTS, then hacked to bits. We also do
2564 * lazy black holing here.
2566 * -------------------------------------------------------------------------- */
2569 threadSqueezeStack(StgTSO *tso)
2571 lnat displacement = 0;
2572 StgUpdateFrame *frame;
2573 StgUpdateFrame *next_frame; /* Temporally next */
2574 StgUpdateFrame *prev_frame; /* Temporally previous */
2576 rtsBool prev_was_update_frame;
2578 bottom = &(tso->stack[tso->stack_size]);
2581 /* There must be at least one frame, namely the STOP_FRAME.
2583 ASSERT((P_)frame < bottom);
2585 /* Walk down the stack, reversing the links between frames so that
2586 * we can walk back up as we squeeze from the bottom. Note that
2587 * next_frame and prev_frame refer to next and previous as they were
2588 * added to the stack, rather than the way we see them in this
2589 * walk. (It makes the next loop less confusing.)
2591 * Stop if we find an update frame pointing to a black hole
2592 * (see comment in threadLazyBlackHole()).
2596 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2597 prev_frame = frame->link;
2598 frame->link = next_frame;
2601 if (get_itbl(frame)->type == UPDATE_FRAME
2602 && frame->updatee->header.info == &BLACKHOLE_info) {
2607 /* Now, we're at the bottom. Frame points to the lowest update
2608 * frame on the stack, and its link actually points to the frame
2609 * above. We have to walk back up the stack, squeezing out empty
2610 * update frames and turning the pointers back around on the way
2613 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2614 * we never want to eliminate it anyway. Just walk one step up
2615 * before starting to squeeze. When you get to the topmost frame,
2616 * remember that there are still some words above it that might have
2623 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2626 * Loop through all of the frames (everything except the very
2627 * bottom). Things are complicated by the fact that we have
2628 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2629 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2631 while (frame != NULL) {
2633 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2634 rtsBool is_update_frame;
2636 next_frame = frame->link;
2637 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2640 * 1. both the previous and current frame are update frames
2641 * 2. the current frame is empty
2643 if (prev_was_update_frame && is_update_frame &&
2644 (P_)prev_frame == frame_bottom + displacement) {
2646 /* Now squeeze out the current frame */
2647 StgClosure *updatee_keep = prev_frame->updatee;
2648 StgClosure *updatee_bypass = frame->updatee;
2651 fprintf(stderr, "squeezing frame at %p\n", frame);
2654 /* Deal with blocking queues. If both updatees have blocked
2655 * threads, then we should merge the queues into the update
2656 * frame that we're keeping.
2658 * Alternatively, we could just wake them up: they'll just go
2659 * straight to sleep on the proper blackhole! This is less code
2660 * and probably less bug prone, although it's probably much
2663 #if 0 /* do it properly... */
2664 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2665 /* Sigh. It has one. Don't lose those threads! */
2666 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2667 /* Urgh. Two queues. Merge them. */
2668 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2670 while (keep_tso->link != END_TSO_QUEUE) {
2671 keep_tso = keep_tso->link;
2673 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2676 /* For simplicity, just swap the BQ for the BH */
2677 P_ temp = updatee_keep;
2679 updatee_keep = updatee_bypass;
2680 updatee_bypass = temp;
2682 /* Record the swap in the kept frame (below) */
2683 prev_frame->updatee = updatee_keep;
2688 TICK_UPD_SQUEEZED();
2689 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2691 sp = (P_)frame - 1; /* sp = stuff to slide */
2692 displacement += sizeofW(StgUpdateFrame);
2695 /* No squeeze for this frame */
2696 sp = frame_bottom - 1; /* Keep the current frame */
2698 /* Do lazy black-holing.
2700 if (is_update_frame) {
2701 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2702 if (bh->header.info != &BLACKHOLE_BQ_info &&
2703 bh->header.info != &CAF_BLACKHOLE_info) {
2704 SET_INFO(bh,&BLACKHOLE_info);
2708 /* Fix the link in the current frame (should point to the frame below) */
2709 frame->link = prev_frame;
2710 prev_was_update_frame = is_update_frame;
2713 /* Now slide all words from sp up to the next frame */
2715 if (displacement > 0) {
2716 P_ next_frame_bottom;
2718 if (next_frame != NULL)
2719 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2721 next_frame_bottom = tso->sp - 1;
2724 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2728 while (sp >= next_frame_bottom) {
2729 sp[displacement] = *sp;
2733 (P_)prev_frame = (P_)frame + displacement;
2737 tso->sp += displacement;
2738 tso->su = prev_frame;
2741 /* -----------------------------------------------------------------------------
2744 * We have to prepare for GC - this means doing lazy black holing
2745 * here. We also take the opportunity to do stack squeezing if it's
2747 * -------------------------------------------------------------------------- */
2750 threadPaused(StgTSO *tso)
2752 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2753 threadSqueezeStack(tso); /* does black holing too */
2755 threadLazyBlackHole(tso);