1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.15 1999/01/19 17:06:02 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
26 /* STATIC OBJECT LIST.
29 * We maintain a linked list of static objects that are still live.
30 * The requirements for this list are:
32 * - we need to scan the list while adding to it, in order to
33 * scavenge all the static objects (in the same way that
34 * breadth-first scavenging works for dynamic objects).
36 * - we need to be able to tell whether an object is already on
37 * the list, to break loops.
39 * Each static object has a "static link field", which we use for
40 * linking objects on to the list. We use a stack-type list, consing
41 * objects on the front as they are added (this means that the
42 * scavenge phase is depth-first, not breadth-first, but that
45 * A separate list is kept for objects that have been scavenged
46 * already - this is so that we can zero all the marks afterwards.
48 * An object is on the list if its static link field is non-zero; this
49 * means that we have to mark the end of the list with '1', not NULL.
51 * Extra notes for generational GC:
53 * Each generation has a static object list associated with it. When
54 * collecting generations up to N, we treat the static object lists
55 * from generations > N as roots.
57 * We build up a static object list while collecting generations 0..N,
58 * which is then appended to the static object list of generation N+1.
60 StgClosure* static_objects; /* live static objects */
61 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
63 /* N is the oldest generation being collected, where the generations
64 * are numbered starting at 0. A major GC (indicated by the major_gc
65 * flag) is when we're collecting all generations. We only attempt to
66 * deal with static objects and GC CAFs when doing a major GC.
69 static rtsBool major_gc;
71 /* Youngest generation that objects should be evacuated to in
72 * evacuate(). (Logically an argument to evacuate, but it's static
73 * a lot of the time so we optimise it into a global variable).
79 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
80 static rtsBool weak_done; /* all done for this pass */
82 /* Flag indicating failure to evacuate an object to the desired
85 static rtsBool failed_to_evac;
87 /* Old to-space (used for two-space collector only)
91 /* -----------------------------------------------------------------------------
92 Static function declarations
93 -------------------------------------------------------------------------- */
95 static StgClosure *evacuate(StgClosure *q);
96 static void zeroStaticObjectList(StgClosure* first_static);
97 static rtsBool traverse_weak_ptr_list(void);
98 static void zeroMutableList(StgMutClosure *first);
99 static void revertDeadCAFs(void);
101 static void scavenge_stack(StgPtr p, StgPtr stack_end);
102 static void scavenge_large(step *step);
103 static void scavenge(step *step);
104 static void scavenge_static(void);
105 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
108 static void gcCAFs(void);
111 /* -----------------------------------------------------------------------------
114 For garbage collecting generation N (and all younger generations):
116 - follow all pointers in the root set. the root set includes all
117 mutable objects in all steps in all generations.
119 - for each pointer, evacuate the object it points to into either
120 + to-space in the next higher step in that generation, if one exists,
121 + if the object's generation == N, then evacuate it to the next
122 generation if one exists, or else to-space in the current
124 + if the object's generation < N, then evacuate it to to-space
125 in the next generation.
127 - repeatedly scavenge to-space from each step in each generation
128 being collected until no more objects can be evacuated.
130 - free from-space in each step, and set from-space = to-space.
132 -------------------------------------------------------------------------- */
134 void GarbageCollect(void (*get_roots)(void))
138 lnat live, allocated, collected = 0;
142 CostCentreStack *prev_CCS;
145 /* tell the stats department that we've started a GC */
148 /* attribute any costs to CCS_GC */
154 /* We might have been called from Haskell land by _ccall_GC, in
155 * which case we need to call threadPaused() because the scheduler
156 * won't have done it.
158 if (CurrentTSO) { threadPaused(CurrentTSO); }
160 /* Approximate how much we allocated: number of blocks in the
161 * nursery + blocks allocated via allocate() - unused nusery blocks.
162 * This leaves a little slop at the end of each block, and doesn't
163 * take into account large objects (ToDo).
165 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
166 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
167 allocated -= BLOCK_SIZE_W;
170 /* Figure out which generation to collect
173 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
174 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
178 major_gc = (N == RtsFlags.GcFlags.generations-1);
180 /* check stack sanity *before* GC (ToDo: check all threads) */
181 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
182 IF_DEBUG(sanity, checkFreeListSanity());
184 /* Initialise the static object lists
186 static_objects = END_OF_STATIC_LIST;
187 scavenged_static_objects = END_OF_STATIC_LIST;
189 /* zero the mutable list for the oldest generation (see comment by
190 * zeroMutableList below).
193 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
196 /* Save the old to-space if we're doing a two-space collection
198 if (RtsFlags.GcFlags.generations == 1) {
199 old_to_space = g0s0->to_space;
200 g0s0->to_space = NULL;
203 /* Initialise to-space in all the generations/steps that we're
206 for (g = 0; g <= N; g++) {
207 generations[g].mut_list = END_MUT_LIST;
209 for (s = 0; s < generations[g].n_steps; s++) {
211 /* generation 0, step 0 doesn't need to-space */
212 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
216 /* Get a free block for to-space. Extra blocks will be chained on
220 step = &generations[g].steps[s];
221 ASSERT(step->gen->no == g);
222 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
223 bd->gen = &generations[g];
226 bd->evacuated = 1; /* it's a to-space block */
227 step->hp = bd->start;
228 step->hpLim = step->hp + BLOCK_SIZE_W;
231 step->to_blocks = 1; /* ???? */
232 step->scan = bd->start;
234 step->new_large_objects = NULL;
235 step->scavenged_large_objects = NULL;
236 /* mark the large objects as not evacuated yet */
237 for (bd = step->large_objects; bd; bd = bd->link) {
243 /* make sure the older generations have at least one block to
244 * allocate into (this makes things easier for copy(), see below.
246 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
247 for (s = 0; s < generations[g].n_steps; s++) {
248 step = &generations[g].steps[s];
249 if (step->hp_bd == NULL) {
251 bd->gen = &generations[g];
254 bd->evacuated = 0; /* *not* a to-space block */
255 step->hp = bd->start;
256 step->hpLim = step->hp + BLOCK_SIZE_W;
261 /* Set the scan pointer for older generations: remember we
262 * still have to scavenge objects that have been promoted. */
263 step->scan = step->hp;
264 step->scan_bd = step->hp_bd;
265 step->to_space = NULL;
267 step->new_large_objects = NULL;
268 step->scavenged_large_objects = NULL;
272 /* -----------------------------------------------------------------------
273 * follow all the roots that we know about:
274 * - mutable lists from each generation > N
275 * we want to *scavenge* these roots, not evacuate them: they're not
276 * going to move in this GC.
277 * Also: do them in reverse generation order. This is because we
278 * often want to promote objects that are pointed to by older
279 * generations early, so we don't have to repeatedly copy them.
280 * Doing the generations in reverse order ensures that we don't end
281 * up in the situation where we want to evac an object to gen 3 and
282 * it has already been evaced to gen 2.
285 StgMutClosure *tmp, **pp;
286 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
287 generations[g].saved_mut_list = generations[g].mut_list;
288 generations[g].mut_list = END_MUT_LIST;
291 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
292 tmp = scavenge_mutable_list(generations[g].saved_mut_list, g);
293 pp = &generations[g].mut_list;
294 while (*pp != END_MUT_LIST) {
295 pp = &(*pp)->mut_link;
301 /* follow all the roots that the application knows about.
306 /* And don't forget to mark the TSO if we got here direct from
309 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
312 /* Mark the weak pointer list, and prepare to detect dead weak
316 old_weak_ptr_list = weak_ptr_list;
317 weak_ptr_list = NULL;
318 weak_done = rtsFalse;
322 /* ToDo: To fix the caf leak, we need to make the commented out
323 * parts of this code do something sensible - as described in
326 extern void markHugsObjects(void);
328 /* ToDo: This (undefined) function should contain the scavenge
329 * loop immediately below this block of code - but I'm not sure
330 * enough of the details to do this myself.
332 scavengeEverything();
333 /* revert dead CAFs and update enteredCAFs list */
338 /* This will keep the CAFs and the attached BCOs alive
339 * but the values will have been reverted
341 scavengeEverything();
346 /* -------------------------------------------------------------------------
347 * Repeatedly scavenge all the areas we know about until there's no
348 * more scavenging to be done.
355 /* scavenge static objects */
356 if (major_gc && static_objects != END_OF_STATIC_LIST) {
360 /* When scavenging the older generations: Objects may have been
361 * evacuated from generations <= N into older generations, and we
362 * need to scavenge these objects. We're going to try to ensure that
363 * any evacuations that occur move the objects into at least the
364 * same generation as the object being scavenged, otherwise we
365 * have to create new entries on the mutable list for the older
369 /* scavenge each step in generations 0..maxgen */
372 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
373 for (s = 0; s < generations[gen].n_steps; s++) {
374 step = &generations[gen].steps[s];
376 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
380 if (step->new_large_objects != NULL) {
381 scavenge_large(step);
387 if (flag) { goto loop; }
389 /* must be last... */
390 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
395 /* Set the maximum blocks for the oldest generation, based on twice
396 * the amount of live data now, adjusted to fit the maximum heap
399 * This is an approximation, since in the worst case we'll need
400 * twice the amount of live data plus whatever space the other
403 if (RtsFlags.GcFlags.generations > 1) {
405 oldest_gen->max_blocks =
406 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
407 RtsFlags.GcFlags.minOldGenSize);
408 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
409 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
410 if (((int)oldest_gen->max_blocks -
411 (int)oldest_gen->steps[0].to_blocks) <
412 (RtsFlags.GcFlags.pcFreeHeap *
413 RtsFlags.GcFlags.maxHeapSize / 200)) {
418 /* For a two-space collector, we need to resize the nursery. */
420 /* set up a new nursery. Allocate a nursery size based on a
421 * function of the amount of live data (currently a factor of 2,
422 * should be configurable (ToDo)). Use the blocks from the old
423 * nursery if possible, freeing up any left over blocks.
425 * If we get near the maximum heap size, then adjust our nursery
426 * size accordingly. If the nursery is the same size as the live
427 * data (L), then we need 3L bytes. We can reduce the size of the
428 * nursery to bring the required memory down near 2L bytes.
430 * A normal 2-space collector would need 4L bytes to give the same
431 * performance we get from 3L bytes, reducing to the same
432 * performance at 2L bytes.
434 nat blocks = g0s0->to_blocks;
436 if ( blocks * 4 > RtsFlags.GcFlags.maxHeapSize ) {
437 int adjusted_blocks; /* signed on purpose */
440 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
441 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
442 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
443 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
446 blocks = adjusted_blocks;
450 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
451 blocks = RtsFlags.GcFlags.minAllocAreaSize;
455 if (nursery_blocks < blocks) {
456 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
458 g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
462 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
464 for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
472 g0s0->n_blocks = nursery_blocks = blocks;
475 /* run through all the generations/steps and tidy up
477 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
480 generations[g].collections++; /* for stats */
483 for (s = 0; s < generations[g].n_steps; s++) {
485 step = &generations[g].steps[s];
487 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
488 /* Tidy the end of the to-space chains */
489 step->hp_bd->free = step->hp;
490 step->hp_bd->link = NULL;
493 /* for generations we collected... */
496 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
498 /* free old memory and shift to-space into from-space for all
499 * the collected steps (except the allocation area). These
500 * freed blocks will probaby be quickly recycled.
502 if (!(g == 0 && s == 0)) {
503 freeChain(step->blocks);
504 step->blocks = step->to_space;
505 step->n_blocks = step->to_blocks;
506 step->to_space = NULL;
508 for (bd = step->blocks; bd != NULL; bd = bd->link) {
509 bd->evacuated = 0; /* now from-space */
513 /* LARGE OBJECTS. The current live large objects are chained on
514 * scavenged_large, having been moved during garbage
515 * collection from large_objects. Any objects left on
516 * large_objects list are therefore dead, so we free them here.
518 for (bd = step->large_objects; bd != NULL; bd = next) {
523 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
526 step->large_objects = step->scavenged_large_objects;
528 /* Set the maximum blocks for this generation, interpolating
529 * between the maximum size of the oldest and youngest
532 * max_blocks = alloc_area_size +
533 * (oldgen_max_blocks - alloc_area_size) * G
534 * -----------------------------------------
538 generations[g].max_blocks =
539 RtsFlags.GcFlags.minAllocAreaSize +
540 (((oldest_gen->max_blocks - RtsFlags.GcFlags.minAllocAreaSize) * g)
541 / (RtsFlags.GcFlags.generations-1));
544 /* for older generations... */
547 /* For older generations, we need to append the
548 * scavenged_large_object list (i.e. large objects that have been
549 * promoted during this GC) to the large_object list for that step.
551 for (bd = step->scavenged_large_objects; bd; bd = next) {
554 dbl_link_onto(bd, &step->large_objects);
557 /* add the new blocks we promoted during this GC */
558 step->n_blocks += step->to_blocks;
563 /* Two-space collector:
564 * Free the old to-space, and estimate the amount of live data.
566 if (RtsFlags.GcFlags.generations == 1) {
567 if (old_to_space != NULL) {
568 freeChain(old_to_space);
570 live = g0s0->to_blocks * BLOCK_SIZE_W +
571 ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
573 /* Generational collector:
574 * estimate the amount of live data.
578 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
579 for (s = 0; s < generations[g].n_steps; s++) {
580 /* approximate amount of live data (doesn't take into account slop
581 * at end of each block). ToDo: this more accurately.
583 if (g == 0 && s == 0) { continue; }
584 step = &generations[g].steps[s];
585 live += step->n_blocks * BLOCK_SIZE_W +
586 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
591 /* revert dead CAFs and update enteredCAFs list */
594 /* mark the garbage collected CAFs as dead */
596 if (major_gc) { gcCAFs(); }
599 /* zero the scavenged static object list */
601 zeroStaticObjectList(scavenged_static_objects);
606 for (bd = g0s0->blocks; bd; bd = bd->link) {
607 bd->free = bd->start;
608 ASSERT(bd->gen == g0);
609 ASSERT(bd->step == g0s0);
611 current_nursery = g0s0->blocks;
613 /* Free the small objects allocated via allocate(), since this will
614 * all have been copied into G0S1 now.
616 if (small_alloc_list != NULL) {
617 freeChain(small_alloc_list);
619 small_alloc_list = NULL;
621 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
623 /* start any pending finalisers */
624 scheduleFinalisers(old_weak_ptr_list);
626 /* check sanity after GC */
628 if (RtsFlags.GcFlags.generations == 1) {
629 IF_DEBUG(sanity, checkHeap(g0s0->to_space, NULL));
630 IF_DEBUG(sanity, checkChain(g0s0->large_objects));
633 for (g = 0; g <= N; g++) {
634 for (s = 0; s < generations[g].n_steps; s++) {
635 if (g == 0 && s == 0) { continue; }
636 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
639 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
640 for (s = 0; s < generations[g].n_steps; s++) {
641 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks,
642 generations[g].steps[s].blocks->start));
643 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
646 IF_DEBUG(sanity, checkFreeListSanity());
650 IF_DEBUG(gc, stat_describe_gens());
653 /* symbol-table based profiling */
654 /* heapCensus(to_space); */ /* ToDo */
657 /* restore enclosing cost centre */
662 /* check for memory leaks if sanity checking is on */
663 IF_DEBUG(sanity, memInventory());
665 /* ok, GC over: tell the stats department what happened. */
666 stat_endGC(allocated, collected, live, N);
669 /* -----------------------------------------------------------------------------
672 traverse_weak_ptr_list is called possibly many times during garbage
673 collection. It returns a flag indicating whether it did any work
674 (i.e. called evacuate on any live pointers).
676 Invariant: traverse_weak_ptr_list is called when the heap is in an
677 idempotent state. That means that there are no pending
678 evacuate/scavenge operations. This invariant helps the weak
679 pointer code decide which weak pointers are dead - if there are no
680 new live weak pointers, then all the currently unreachable ones are
683 For generational GC: we just don't try to finalise weak pointers in
684 older generations than the one we're collecting. This could
685 probably be optimised by keeping per-generation lists of weak
686 pointers, but for a few weak pointers this scheme will work.
687 -------------------------------------------------------------------------- */
690 traverse_weak_ptr_list(void)
692 StgWeak *w, **last_w, *next_w;
694 const StgInfoTable *info;
695 rtsBool flag = rtsFalse;
697 if (weak_done) { return rtsFalse; }
699 /* doesn't matter where we evacuate values/finalisers to, since
700 * these pointers are treated as roots (iff the keys are alive).
704 last_w = &old_weak_ptr_list;
705 for (w = old_weak_ptr_list; w; w = next_w) {
708 /* ignore weak pointers in older generations */
709 if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) {
710 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive (in old gen) at %p\n", w));
711 /* remove this weak ptr from the old_weak_ptr list */
713 /* and put it on the new weak ptr list */
715 w->link = weak_ptr_list;
721 info = get_itbl(target);
722 switch (info->type) {
727 case IND_OLDGEN: /* rely on compatible layout with StgInd */
728 case IND_OLDGEN_PERM:
729 /* follow indirections */
730 target = ((StgInd *)target)->indirectee;
734 /* If key is alive, evacuate value and finaliser and
735 * place weak ptr on new weak ptr list.
737 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w));
738 w->key = ((StgEvacuated *)target)->evacuee;
739 w->value = evacuate(w->value);
740 w->finaliser = evacuate(w->finaliser);
742 /* remove this weak ptr from the old_weak_ptr list */
745 /* and put it on the new weak ptr list */
747 w->link = weak_ptr_list;
752 default: /* key is dead */
759 /* If we didn't make any changes, then we can go round and kill all
760 * the dead weak pointers. The old_weak_ptr list is used as a list
761 * of pending finalisers later on.
763 if (flag == rtsFalse) {
764 for (w = old_weak_ptr_list; w; w = w->link) {
765 w->value = evacuate(w->value);
766 w->finaliser = evacuate(w->finaliser);
775 MarkRoot(StgClosure *root)
777 root = evacuate(root);
781 static inline void addBlock(step *step)
783 bdescr *bd = allocBlock();
787 if (step->gen->no <= N) {
793 step->hp_bd->free = step->hp;
794 step->hp_bd->link = bd;
795 step->hp = bd->start;
796 step->hpLim = step->hp + BLOCK_SIZE_W;
801 static __inline__ StgClosure *
802 copy(StgClosure *src, nat size, bdescr *bd)
807 /* Find out where we're going, using the handy "to" pointer in
808 * the step of the source object. If it turns out we need to
809 * evacuate to an older generation, adjust it here (see comment
813 if (step->gen->no < evac_gen) {
814 step = &generations[evac_gen].steps[0];
817 /* chain a new block onto the to-space for the destination step if
820 if (step->hp + size >= step->hpLim) {
826 for(to = dest, from = (P_)src; size>0; --size) {
829 return (StgClosure *)dest;
832 /* Special version of copy() for when we only want to copy the info
833 * pointer of an object, but reserve some padding after it. This is
834 * used to optimise evacuation of BLACKHOLEs.
837 static __inline__ StgClosure *
838 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, bdescr *bd)
844 if (step->gen->no < evac_gen) {
845 step = &generations[evac_gen].steps[0];
848 if (step->hp + size_to_reserve >= step->hpLim) {
853 step->hp += size_to_reserve;
854 for(to = dest, from = (P_)src; size_to_copy>0; --size_to_copy) {
858 return (StgClosure *)dest;
861 static __inline__ void
862 upd_evacuee(StgClosure *p, StgClosure *dest)
864 StgEvacuated *q = (StgEvacuated *)p;
866 SET_INFO(q,&EVACUATED_info);
870 /* -----------------------------------------------------------------------------
871 Evacuate a mutable object
873 If we evacuate a mutable object to an old generation, cons the
874 object onto the older generation's mutable list.
875 -------------------------------------------------------------------------- */
878 evacuate_mutable(StgMutClosure *c)
883 if (bd->gen->no > 0) {
884 c->mut_link = bd->gen->mut_list;
885 bd->gen->mut_list = c;
889 /* -----------------------------------------------------------------------------
890 Evacuate a large object
892 This just consists of removing the object from the (doubly-linked)
893 large_alloc_list, and linking it on to the (singly-linked)
894 new_large_objects list, from where it will be scavenged later.
896 Convention: bd->evacuated is /= 0 for a large object that has been
897 evacuated, or 0 otherwise.
898 -------------------------------------------------------------------------- */
901 evacuate_large(StgPtr p, rtsBool mutable)
903 bdescr *bd = Bdescr(p);
906 /* should point to the beginning of the block */
907 ASSERT(((W_)p & BLOCK_MASK) == 0);
909 /* already evacuated? */
911 /* Don't forget to set the failed_to_evac flag if we didn't get
912 * the desired destination (see comments in evacuate()).
914 if (bd->gen->no < evac_gen) {
915 failed_to_evac = rtsTrue;
921 /* remove from large_object list */
923 bd->back->link = bd->link;
924 } else { /* first object in the list */
925 step->large_objects = bd->link;
928 bd->link->back = bd->back;
931 /* link it on to the evacuated large object list of the destination step
934 if (step->gen->no < evac_gen) {
935 step = &generations[evac_gen].steps[0];
940 bd->link = step->new_large_objects;
941 step->new_large_objects = bd;
945 evacuate_mutable((StgMutClosure *)p);
949 /* -----------------------------------------------------------------------------
950 Adding a MUT_CONS to an older generation.
952 This is necessary from time to time when we end up with an
953 old-to-new generation pointer in a non-mutable object. We defer
954 the promotion until the next GC.
955 -------------------------------------------------------------------------- */
958 mkMutCons(StgClosure *ptr, generation *gen)
963 step = &gen->steps[0];
965 /* chain a new block onto the to-space for the destination step if
968 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
972 q = (StgMutVar *)step->hp;
973 step->hp += sizeofW(StgMutVar);
975 SET_HDR(q,&MUT_CONS_info,CCS_GC);
977 evacuate_mutable((StgMutClosure *)q);
979 return (StgClosure *)q;
982 /* -----------------------------------------------------------------------------
985 This is called (eventually) for every live object in the system.
987 The caller to evacuate specifies a desired generation in the
988 evac_gen global variable. The following conditions apply to
989 evacuating an object which resides in generation M when we're
990 collecting up to generation N
994 else evac to step->to
996 if M < evac_gen evac to evac_gen, step 0
998 if the object is already evacuated, then we check which generation
1001 if M >= evac_gen do nothing
1002 if M < evac_gen set failed_to_evac flag to indicate that we
1003 didn't manage to evacuate this object into evac_gen.
1005 -------------------------------------------------------------------------- */
1009 evacuate(StgClosure *q)
1013 const StgInfoTable *info;
1016 if (!LOOKS_LIKE_STATIC(q)) {
1018 if (bd->gen->no > N) {
1019 /* Can't evacuate this object, because it's in a generation
1020 * older than the ones we're collecting. Let's hope that it's
1021 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1023 if (bd->gen->no < evac_gen) {
1025 failed_to_evac = rtsTrue;
1031 /* make sure the info pointer is into text space */
1032 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1033 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1036 switch (info -> type) {
1039 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),bd);
1045 to = copy(q,sizeW_fromITBL(info),bd);
1047 evacuate_mutable((StgMutClosure *)to);
1054 case IND_OLDGEN_PERM:
1059 to = copy(q,sizeW_fromITBL(info),bd);
1065 to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),bd);
1070 to = copy(q,BLACKHOLE_sizeW(),bd);
1072 evacuate_mutable((StgMutClosure *)to);
1075 case THUNK_SELECTOR:
1077 const StgInfoTable* selectee_info;
1078 StgClosure* selectee = ((StgSelector*)q)->selectee;
1081 selectee_info = get_itbl(selectee);
1082 switch (selectee_info->type) {
1086 StgNat32 offset = info->layout.selector_offset;
1088 /* check that the size is in range */
1090 (StgNat32)(selectee_info->layout.payload.ptrs +
1091 selectee_info->layout.payload.nptrs));
1093 /* perform the selection! */
1094 q = selectee->payload[offset];
1096 /* if we're already in to-space, there's no need to continue
1097 * with the evacuation, just update the source address with
1098 * a pointer to the (evacuated) constructor field.
1100 if (IS_USER_PTR(q)) {
1101 bdescr *bd = Bdescr((P_)q);
1102 if (bd->evacuated) {
1103 if (bd->gen->no < evac_gen) {
1104 failed_to_evac = rtsTrue;
1110 /* otherwise, carry on and evacuate this constructor field,
1111 * (but not the constructor itself)
1120 case IND_OLDGEN_PERM:
1121 selectee = stgCast(StgInd *,selectee)->indirectee;
1125 selectee = stgCast(StgCAF *,selectee)->value;
1129 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1134 case THUNK_SELECTOR:
1135 /* aargh - do recursively???? */
1140 /* not evaluated yet */
1144 barf("evacuate: THUNK_SELECTOR: strange selectee");
1147 to = copy(q,THUNK_SELECTOR_sizeW(),bd);
1153 /* follow chains of indirections, don't evacuate them */
1154 q = ((StgInd*)q)->indirectee;
1157 /* ToDo: optimise STATIC_LINK for known cases.
1158 - FUN_STATIC : payload[0]
1159 - THUNK_STATIC : payload[1]
1160 - IND_STATIC : payload[1]
1164 if (info->srt_len == 0) { /* small optimisation */
1170 /* don't want to evacuate these, but we do want to follow pointers
1171 * from SRTs - see scavenge_static.
1174 /* put the object on the static list, if necessary.
1176 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1177 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1178 static_objects = (StgClosure *)q;
1182 case CONSTR_INTLIKE:
1183 case CONSTR_CHARLIKE:
1184 case CONSTR_NOCAF_STATIC:
1185 /* no need to put these on the static linked list, they don't need
1200 /* shouldn't see these */
1201 barf("evacuate: stack frame\n");
1205 /* these are special - the payload is a copy of a chunk of stack,
1207 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),bd);
1212 /* Already evacuated, just return the forwarding address.
1213 * HOWEVER: if the requested destination generation (evac_gen) is
1214 * older than the actual generation (because the object was
1215 * already evacuated to a younger generation) then we have to
1216 * set the failed_to_evac flag to indicate that we couldn't
1217 * manage to promote the object to the desired generation.
1219 if (evac_gen > 0) { /* optimisation */
1220 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1221 if (Bdescr((P_)p)->gen->no < evac_gen) {
1222 /* fprintf(stderr,"evac failed!\n");*/
1223 failed_to_evac = rtsTrue;
1226 return ((StgEvacuated*)q)->evacuee;
1231 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1233 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1234 evacuate_large((P_)q, rtsFalse);
1237 /* just copy the block */
1238 to = copy(q,size,bd);
1245 case MUT_ARR_PTRS_FROZEN:
1247 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1249 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1250 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1253 /* just copy the block */
1254 to = copy(q,size,bd);
1256 if (info->type == MUT_ARR_PTRS) {
1257 evacuate_mutable((StgMutClosure *)to);
1265 StgTSO *tso = stgCast(StgTSO *,q);
1266 nat size = tso_sizeW(tso);
1269 /* Large TSOs don't get moved, so no relocation is required.
1271 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1272 evacuate_large((P_)q, rtsTrue);
1275 /* To evacuate a small TSO, we need to relocate the update frame
1279 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),bd);
1281 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1283 /* relocate the stack pointers... */
1284 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1285 new_tso->sp = (StgPtr)new_tso->sp + diff;
1286 new_tso->splim = (StgPtr)new_tso->splim + diff;
1288 relocate_TSO(tso, new_tso);
1289 upd_evacuee(q,(StgClosure *)new_tso);
1291 evacuate_mutable((StgMutClosure *)new_tso);
1292 return (StgClosure *)new_tso;
1298 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1302 barf("evacuate: strange closure type");
1308 /* -----------------------------------------------------------------------------
1309 relocate_TSO is called just after a TSO has been copied from src to
1310 dest. It adjusts the update frame list for the new location.
1311 -------------------------------------------------------------------------- */
1314 relocate_TSO(StgTSO *src, StgTSO *dest)
1321 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1325 while ((P_)su < dest->stack + dest->stack_size) {
1326 switch (get_itbl(su)->type) {
1328 /* GCC actually manages to common up these three cases! */
1331 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1336 cf = (StgCatchFrame *)su;
1337 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1342 sf = (StgSeqFrame *)su;
1343 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1352 barf("relocate_TSO");
1361 scavenge_srt(const StgInfoTable *info)
1363 StgClosure **srt, **srt_end;
1365 /* evacuate the SRT. If srt_len is zero, then there isn't an
1366 * srt field in the info table. That's ok, because we'll
1367 * never dereference it.
1369 srt = stgCast(StgClosure **,info->srt);
1370 srt_end = srt + info->srt_len;
1371 for (; srt < srt_end; srt++) {
1376 /* -----------------------------------------------------------------------------
1377 Scavenge a given step until there are no more objects in this step
1380 evac_gen is set by the caller to be either zero (for a step in a
1381 generation < N) or G where G is the generation of the step being
1384 We sometimes temporarily change evac_gen back to zero if we're
1385 scavenging a mutable object where early promotion isn't such a good
1387 -------------------------------------------------------------------------- */
1391 scavenge(step *step)
1394 const StgInfoTable *info;
1396 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1401 failed_to_evac = rtsFalse;
1403 /* scavenge phase - standard breadth-first scavenging of the
1407 while (bd != step->hp_bd || p < step->hp) {
1409 /* If we're at the end of this block, move on to the next block */
1410 if (bd != step->hp_bd && p == bd->free) {
1416 q = p; /* save ptr to object */
1418 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1419 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1421 info = get_itbl((StgClosure *)p);
1422 switch (info -> type) {
1426 StgBCO* bco = stgCast(StgBCO*,p);
1428 for (i = 0; i < bco->n_ptrs; i++) {
1429 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1431 p += bco_sizeW(bco);
1436 /* treat MVars specially, because we don't want to evacuate the
1437 * mut_link field in the middle of the closure.
1440 StgMVar *mvar = ((StgMVar *)p);
1442 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1443 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1444 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1445 p += sizeofW(StgMVar);
1446 evac_gen = saved_evac_gen;
1459 case IND_OLDGEN_PERM:
1465 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1466 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1467 (StgClosure *)*p = evacuate((StgClosure *)*p);
1469 p += info->layout.payload.nptrs;
1474 /* ignore MUT_CONSs */
1475 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1477 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1478 evac_gen = saved_evac_gen;
1480 p += sizeofW(StgMutVar);
1485 p += BLACKHOLE_sizeW();
1490 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1491 (StgClosure *)bh->blocking_queue =
1492 evacuate((StgClosure *)bh->blocking_queue);
1493 if (failed_to_evac) {
1494 failed_to_evac = rtsFalse;
1495 evacuate_mutable((StgMutClosure *)bh);
1497 p += BLACKHOLE_sizeW();
1501 case THUNK_SELECTOR:
1503 StgSelector *s = (StgSelector *)p;
1504 s->selectee = evacuate(s->selectee);
1505 p += THUNK_SELECTOR_sizeW();
1511 barf("scavenge:IND???\n");
1513 case CONSTR_INTLIKE:
1514 case CONSTR_CHARLIKE:
1516 case CONSTR_NOCAF_STATIC:
1520 /* Shouldn't see a static object here. */
1521 barf("scavenge: STATIC object\n");
1533 /* Shouldn't see stack frames here. */
1534 barf("scavenge: stack frame\n");
1536 case AP_UPD: /* same as PAPs */
1538 /* Treat a PAP just like a section of stack, not forgetting to
1539 * evacuate the function pointer too...
1542 StgPAP* pap = stgCast(StgPAP*,p);
1544 pap->fun = evacuate(pap->fun);
1545 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1546 p += pap_sizeW(pap);
1552 /* nothing to follow */
1553 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1557 /* follow everything */
1561 evac_gen = 0; /* repeatedly mutable */
1562 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1563 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1564 (StgClosure *)*p = evacuate((StgClosure *)*p);
1566 evac_gen = saved_evac_gen;
1570 case MUT_ARR_PTRS_FROZEN:
1571 /* follow everything */
1573 StgPtr start = p, next;
1575 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1576 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1577 (StgClosure *)*p = evacuate((StgClosure *)*p);
1579 if (failed_to_evac) {
1580 /* we can do this easier... */
1581 evacuate_mutable((StgMutClosure *)start);
1582 failed_to_evac = rtsFalse;
1593 /* chase the link field for any TSOs on the same queue */
1594 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1595 /* scavenge this thread's stack */
1596 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1597 evac_gen = saved_evac_gen;
1598 p += tso_sizeW(tso);
1605 barf("scavenge: unimplemented/strange closure type\n");
1611 /* If we didn't manage to promote all the objects pointed to by
1612 * the current object, then we have to designate this object as
1613 * mutable (because it contains old-to-new generation pointers).
1615 if (failed_to_evac) {
1616 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1617 failed_to_evac = rtsFalse;
1625 /* -----------------------------------------------------------------------------
1626 Scavenge one object.
1628 This is used for objects that are temporarily marked as mutable
1629 because they contain old-to-new generation pointers. Only certain
1630 objects can have this property.
1631 -------------------------------------------------------------------------- */
1633 scavenge_one(StgPtr p)
1638 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1639 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1641 info = get_itbl((StgClosure *)p);
1643 switch (info -> type) {
1651 case IND_OLDGEN_PERM:
1657 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1658 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1659 (StgClosure *)*p = evacuate((StgClosure *)*p);
1668 case THUNK_SELECTOR:
1670 StgSelector *s = (StgSelector *)p;
1671 s->selectee = evacuate(s->selectee);
1675 case AP_UPD: /* same as PAPs */
1677 /* Treat a PAP just like a section of stack, not forgetting to
1678 * evacuate the function pointer too...
1681 StgPAP* pap = stgCast(StgPAP*,p);
1683 pap->fun = evacuate(pap->fun);
1684 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1689 /* This might happen if for instance a MUT_CONS was pointing to a
1690 * THUNK which has since been updated. The IND_OLDGEN will
1691 * be on the mutable list anyway, so we don't need to do anything
1697 barf("scavenge_one: strange object");
1700 no_luck = failed_to_evac;
1701 failed_to_evac = rtsFalse;
1706 /* -----------------------------------------------------------------------------
1707 Scavenging mutable lists.
1709 We treat the mutable list of each generation > N (i.e. all the
1710 generations older than the one being collected) as roots. We also
1711 remove non-mutable objects from the mutable list at this point.
1712 -------------------------------------------------------------------------- */
1714 static StgMutClosure *
1715 scavenge_mutable_list(StgMutClosure *p, nat gen)
1718 StgMutClosure *start;
1719 StgMutClosure **prev;
1726 failed_to_evac = rtsFalse;
1728 for (; p != END_MUT_LIST; p = *prev) {
1730 /* make sure the info pointer is into text space */
1731 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1732 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1735 switch(info->type) {
1737 case MUT_ARR_PTRS_FROZEN:
1738 /* remove this guy from the mutable list, but follow the ptrs
1739 * anyway (and make sure they get promoted to this gen).
1744 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1746 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1747 (StgClosure *)*q = evacuate((StgClosure *)*q);
1751 if (failed_to_evac) {
1752 failed_to_evac = rtsFalse;
1753 prev = &p->mut_link;
1755 *prev = p->mut_link;
1761 /* follow everything */
1762 prev = &p->mut_link;
1766 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1767 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1768 (StgClosure *)*q = evacuate((StgClosure *)*q);
1774 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1775 * it from the mutable list if possible by promoting whatever it
1778 if (p->header.info == &MUT_CONS_info) {
1780 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1781 /* didn't manage to promote everything, so leave the
1782 * MUT_CONS on the list.
1784 prev = &p->mut_link;
1786 *prev = p->mut_link;
1790 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1791 prev = &p->mut_link;
1797 StgMVar *mvar = (StgMVar *)p;
1798 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1799 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1800 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1801 prev = &p->mut_link;
1806 /* follow ptrs and remove this from the mutable list */
1808 StgTSO *tso = (StgTSO *)p;
1810 /* Don't bother scavenging if this thread is dead
1812 if (!(tso->whatNext == ThreadComplete ||
1813 tso->whatNext == ThreadKilled)) {
1814 /* Don't need to chase the link field for any TSOs on the
1815 * same queue. Just scavenge this thread's stack
1817 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1820 /* Don't take this TSO off the mutable list - it might still
1821 * point to some younger objects (because we set evac_gen to 0
1824 prev = &tso->mut_link;
1829 case IND_OLDGEN_PERM:
1831 /* Try to pull the indirectee into this generation, so we can
1832 * remove the indirection from the mutable list.
1835 ((StgIndOldGen *)p)->indirectee =
1836 evacuate(((StgIndOldGen *)p)->indirectee);
1839 if (failed_to_evac) {
1840 failed_to_evac = rtsFalse;
1841 prev = &p->mut_link;
1843 *prev = p->mut_link;
1844 /* the mut_link field of an IND_STATIC is overloaded as the
1845 * static link field too (it just so happens that we don't need
1846 * both at the same time), so we need to NULL it out when
1847 * removing this object from the mutable list because the static
1848 * link fields are all assumed to be NULL before doing a major
1857 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1858 (StgClosure *)bh->blocking_queue =
1859 evacuate((StgClosure *)bh->blocking_queue);
1860 prev = &p->mut_link;
1865 /* shouldn't have anything else on the mutables list */
1866 barf("scavenge_mutable_object: non-mutable object?");
1873 scavenge_static(void)
1875 StgClosure* p = static_objects;
1876 const StgInfoTable *info;
1878 /* Always evacuate straight to the oldest generation for static
1880 evac_gen = oldest_gen->no;
1882 /* keep going until we've scavenged all the objects on the linked
1884 while (p != END_OF_STATIC_LIST) {
1888 /* make sure the info pointer is into text space */
1889 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1890 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1892 /* Take this object *off* the static_objects list,
1893 * and put it on the scavenged_static_objects list.
1895 static_objects = STATIC_LINK(info,p);
1896 STATIC_LINK(info,p) = scavenged_static_objects;
1897 scavenged_static_objects = p;
1899 switch (info -> type) {
1903 StgInd *ind = (StgInd *)p;
1904 ind->indirectee = evacuate(ind->indirectee);
1906 /* might fail to evacuate it, in which case we have to pop it
1907 * back on the mutable list (and take it off the
1908 * scavenged_static list because the static link and mut link
1909 * pointers are one and the same).
1911 if (failed_to_evac) {
1912 failed_to_evac = rtsFalse;
1913 scavenged_static_objects = STATIC_LINK(info,p);
1914 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
1915 oldest_gen->mut_list = (StgMutClosure *)ind;
1929 next = (P_)p->payload + info->layout.payload.ptrs;
1930 /* evacuate the pointers */
1931 for (q = (P_)p->payload; q < next; q++) {
1932 (StgClosure *)*q = evacuate((StgClosure *)*q);
1938 barf("scavenge_static");
1941 ASSERT(failed_to_evac == rtsFalse);
1943 /* get the next static object from the list. Remeber, there might
1944 * be more stuff on this list now that we've done some evacuating!
1945 * (static_objects is a global)
1951 /* -----------------------------------------------------------------------------
1952 scavenge_stack walks over a section of stack and evacuates all the
1953 objects pointed to by it. We can use the same code for walking
1954 PAPs, since these are just sections of copied stack.
1955 -------------------------------------------------------------------------- */
1958 scavenge_stack(StgPtr p, StgPtr stack_end)
1961 const StgInfoTable* info;
1965 * Each time around this loop, we are looking at a chunk of stack
1966 * that starts with either a pending argument section or an
1967 * activation record.
1970 while (p < stack_end) {
1971 q = *stgCast(StgPtr*,p);
1973 /* If we've got a tag, skip over that many words on the stack */
1974 if (IS_ARG_TAG(stgCast(StgWord,q))) {
1979 /* Is q a pointer to a closure?
1981 if (! LOOKS_LIKE_GHC_INFO(q)) {
1984 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
1985 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
1987 /* otherwise, must be a pointer into the allocation space.
1991 (StgClosure *)*p = evacuate((StgClosure *)q);
1997 * Otherwise, q must be the info pointer of an activation
1998 * record. All activation records have 'bitmap' style layout
2001 info = get_itbl(stgCast(StgClosure*,p));
2003 switch (info->type) {
2005 /* Dynamic bitmap: the mask is stored on the stack */
2007 bitmap = stgCast(StgRetDyn*,p)->liveness;
2008 p = &payloadWord(stgCast(StgRetDyn*,p),0);
2011 /* probably a slow-entry point return address: */
2017 /* Specialised code for update frames, since they're so common.
2018 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2019 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2023 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2025 StgClosureType type = get_itbl(frame->updatee)->type;
2027 p += sizeofW(StgUpdateFrame);
2028 if (type == EVACUATED) {
2029 frame->updatee = evacuate(frame->updatee);
2032 bdescr *bd = Bdescr((P_)frame->updatee);
2033 if (bd->gen->no > N) {
2034 if (bd->gen->no < evac_gen) {
2035 failed_to_evac = rtsTrue;
2042 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2043 sizeofW(StgHeader), bd);
2044 upd_evacuee(frame->updatee,to);
2045 frame->updatee = to;
2048 to = copy(frame->updatee, BLACKHOLE_sizeW(), bd);
2049 upd_evacuee(frame->updatee,to);
2050 frame->updatee = to;
2051 evacuate_mutable((StgMutClosure *)to);
2054 barf("scavenge_stack: UPDATE_FRAME updatee");
2059 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2066 bitmap = info->layout.bitmap;
2069 while (bitmap != 0) {
2070 if ((bitmap & 1) == 0) {
2071 (StgClosure *)*p = evacuate((StgClosure *)*p);
2074 bitmap = bitmap >> 1;
2081 /* large bitmap (> 32 entries) */
2086 StgLargeBitmap *large_bitmap;
2089 large_bitmap = info->layout.large_bitmap;
2092 for (i=0; i<large_bitmap->size; i++) {
2093 bitmap = large_bitmap->bitmap[i];
2094 q = p + sizeof(W_) * 8;
2095 while (bitmap != 0) {
2096 if ((bitmap & 1) == 0) {
2097 (StgClosure *)*p = evacuate((StgClosure *)*p);
2100 bitmap = bitmap >> 1;
2102 if (i+1 < large_bitmap->size) {
2104 (StgClosure *)*p = evacuate((StgClosure *)*p);
2110 /* and don't forget to follow the SRT */
2115 barf("scavenge_stack: weird activation record found on stack.\n");
2120 /*-----------------------------------------------------------------------------
2121 scavenge the large object list.
2123 evac_gen set by caller; similar games played with evac_gen as with
2124 scavenge() - see comment at the top of scavenge(). Most large
2125 objects are (repeatedly) mutable, so most of the time evac_gen will
2127 --------------------------------------------------------------------------- */
2130 scavenge_large(step *step)
2134 const StgInfoTable* info;
2135 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2137 evac_gen = 0; /* most objects are mutable */
2138 bd = step->new_large_objects;
2140 for (; bd != NULL; bd = step->new_large_objects) {
2142 /* take this object *off* the large objects list and put it on
2143 * the scavenged large objects list. This is so that we can
2144 * treat new_large_objects as a stack and push new objects on
2145 * the front when evacuating.
2147 step->new_large_objects = bd->link;
2148 dbl_link_onto(bd, &step->scavenged_large_objects);
2151 info = get_itbl(stgCast(StgClosure*,p));
2153 switch (info->type) {
2155 /* only certain objects can be "large"... */
2159 /* nothing to follow */
2163 /* follow everything */
2167 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2168 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2169 (StgClosure *)*p = evacuate((StgClosure *)*p);
2174 case MUT_ARR_PTRS_FROZEN:
2175 /* follow everything */
2177 StgPtr start = p, next;
2179 evac_gen = saved_evac_gen; /* not really mutable */
2180 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2181 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2182 (StgClosure *)*p = evacuate((StgClosure *)*p);
2185 if (failed_to_evac) {
2186 evacuate_mutable((StgMutClosure *)start);
2193 StgBCO* bco = stgCast(StgBCO*,p);
2195 evac_gen = saved_evac_gen;
2196 for (i = 0; i < bco->n_ptrs; i++) {
2197 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2208 /* chase the link field for any TSOs on the same queue */
2209 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2210 /* scavenge this thread's stack */
2211 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2216 barf("scavenge_large: unknown/strange object");
2222 zeroStaticObjectList(StgClosure* first_static)
2226 const StgInfoTable *info;
2228 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2230 link = STATIC_LINK(info, p);
2231 STATIC_LINK(info,p) = NULL;
2235 /* This function is only needed because we share the mutable link
2236 * field with the static link field in an IND_STATIC, so we have to
2237 * zero the mut_link field before doing a major GC, which needs the
2238 * static link field.
2240 * It doesn't do any harm to zero all the mutable link fields on the
2244 zeroMutableList(StgMutClosure *first)
2246 StgMutClosure *next, *c;
2248 for (c = first; c != END_MUT_LIST; c = next) {
2254 /* -----------------------------------------------------------------------------
2256 -------------------------------------------------------------------------- */
2258 void RevertCAFs(void)
2260 while (enteredCAFs != END_CAF_LIST) {
2261 StgCAF* caf = enteredCAFs;
2263 enteredCAFs = caf->link;
2264 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2265 SET_INFO(caf,&CAF_UNENTERED_info);
2266 caf->value = stgCast(StgClosure*,0xdeadbeef);
2267 caf->link = stgCast(StgCAF*,0xdeadbeef);
2271 void revertDeadCAFs(void)
2273 StgCAF* caf = enteredCAFs;
2274 enteredCAFs = END_CAF_LIST;
2275 while (caf != END_CAF_LIST) {
2276 StgCAF* next = caf->link;
2278 switch(GET_INFO(caf)->type) {
2281 /* This object has been evacuated, it must be live. */
2282 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2283 new->link = enteredCAFs;
2289 SET_INFO(caf,&CAF_UNENTERED_info);
2290 caf->value = stgCast(StgClosure*,0xdeadbeef);
2291 caf->link = stgCast(StgCAF*,0xdeadbeef);
2295 barf("revertDeadCAFs: enteredCAFs list corrupted");
2301 /* -----------------------------------------------------------------------------
2302 Sanity code for CAF garbage collection.
2304 With DEBUG turned on, we manage a CAF list in addition to the SRT
2305 mechanism. After GC, we run down the CAF list and blackhole any
2306 CAFs which have been garbage collected. This means we get an error
2307 whenever the program tries to enter a garbage collected CAF.
2309 Any garbage collected CAFs are taken off the CAF list at the same
2311 -------------------------------------------------------------------------- */
2319 const StgInfoTable *info;
2330 ASSERT(info->type == IND_STATIC);
2332 if (STATIC_LINK(info,p) == NULL) {
2333 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2335 SET_INFO(p,&BLACKHOLE_info);
2336 p = STATIC_LINK2(info,p);
2340 pp = &STATIC_LINK2(info,p);
2347 /* fprintf(stderr, "%d CAFs live\n", i); */
2351 /* -----------------------------------------------------------------------------
2354 Whenever a thread returns to the scheduler after possibly doing
2355 some work, we have to run down the stack and black-hole all the
2356 closures referred to by update frames.
2357 -------------------------------------------------------------------------- */
2360 threadLazyBlackHole(StgTSO *tso)
2362 StgUpdateFrame *update_frame;
2363 StgBlockingQueue *bh;
2366 stack_end = &tso->stack[tso->stack_size];
2367 update_frame = tso->su;
2370 switch (get_itbl(update_frame)->type) {
2373 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2377 bh = (StgBlockingQueue *)update_frame->updatee;
2379 /* if the thunk is already blackholed, it means we've also
2380 * already blackholed the rest of the thunks on this stack,
2381 * so we can stop early.
2384 /* Don't for now: when we enter a CAF, we create a black hole on
2385 * the heap and make the update frame point to it. Thus the
2386 * above optimisation doesn't apply.
2388 if (bh->header.info != &BLACKHOLE_info
2389 && bh->header.info != &BLACKHOLE_BQ_info
2390 && bh->header.info != &CAF_BLACKHOLE_info) {
2391 SET_INFO(bh,&BLACKHOLE_info);
2394 update_frame = update_frame->link;
2398 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2404 barf("threadPaused");
2409 /* -----------------------------------------------------------------------------
2412 * Code largely pinched from old RTS, then hacked to bits. We also do
2413 * lazy black holing here.
2415 * -------------------------------------------------------------------------- */
2418 threadSqueezeStack(StgTSO *tso)
2420 lnat displacement = 0;
2421 StgUpdateFrame *frame;
2422 StgUpdateFrame *next_frame; /* Temporally next */
2423 StgUpdateFrame *prev_frame; /* Temporally previous */
2425 rtsBool prev_was_update_frame;
2427 bottom = &(tso->stack[tso->stack_size]);
2430 /* There must be at least one frame, namely the STOP_FRAME.
2432 ASSERT((P_)frame < bottom);
2434 /* Walk down the stack, reversing the links between frames so that
2435 * we can walk back up as we squeeze from the bottom. Note that
2436 * next_frame and prev_frame refer to next and previous as they were
2437 * added to the stack, rather than the way we see them in this
2438 * walk. (It makes the next loop less confusing.)
2440 * Could stop if we find an update frame pointing to a black hole,
2441 * but see comment in threadLazyBlackHole().
2445 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2446 prev_frame = frame->link;
2447 frame->link = next_frame;
2452 /* Now, we're at the bottom. Frame points to the lowest update
2453 * frame on the stack, and its link actually points to the frame
2454 * above. We have to walk back up the stack, squeezing out empty
2455 * update frames and turning the pointers back around on the way
2458 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2459 * we never want to eliminate it anyway. Just walk one step up
2460 * before starting to squeeze. When you get to the topmost frame,
2461 * remember that there are still some words above it that might have
2468 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2471 * Loop through all of the frames (everything except the very
2472 * bottom). Things are complicated by the fact that we have
2473 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2474 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2476 while (frame != NULL) {
2478 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2479 rtsBool is_update_frame;
2481 next_frame = frame->link;
2482 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2485 * 1. both the previous and current frame are update frames
2486 * 2. the current frame is empty
2488 if (prev_was_update_frame && is_update_frame &&
2489 (P_)prev_frame == frame_bottom + displacement) {
2491 /* Now squeeze out the current frame */
2492 StgClosure *updatee_keep = prev_frame->updatee;
2493 StgClosure *updatee_bypass = frame->updatee;
2496 fprintf(stderr, "squeezing frame at %p\n", frame);
2499 /* Deal with blocking queues. If both updatees have blocked
2500 * threads, then we should merge the queues into the update
2501 * frame that we're keeping.
2503 * Alternatively, we could just wake them up: they'll just go
2504 * straight to sleep on the proper blackhole! This is less code
2505 * and probably less bug prone, although it's probably much
2508 #if 0 /* do it properly... */
2509 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2510 /* Sigh. It has one. Don't lose those threads! */
2511 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2512 /* Urgh. Two queues. Merge them. */
2513 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2515 while (keep_tso->link != END_TSO_QUEUE) {
2516 keep_tso = keep_tso->link;
2518 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2521 /* For simplicity, just swap the BQ for the BH */
2522 P_ temp = updatee_keep;
2524 updatee_keep = updatee_bypass;
2525 updatee_bypass = temp;
2527 /* Record the swap in the kept frame (below) */
2528 prev_frame->updatee = updatee_keep;
2533 TICK_UPD_SQUEEZED();
2534 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2536 sp = (P_)frame - 1; /* sp = stuff to slide */
2537 displacement += sizeofW(StgUpdateFrame);
2540 /* No squeeze for this frame */
2541 sp = frame_bottom - 1; /* Keep the current frame */
2543 /* Do lazy black-holing.
2545 if (is_update_frame) {
2546 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2547 if (bh->header.info != &BLACKHOLE_info
2548 && bh->header.info != &BLACKHOLE_BQ_info
2549 && bh->header.info != &CAF_BLACKHOLE_info
2551 SET_INFO(bh,&BLACKHOLE_info);
2555 /* Fix the link in the current frame (should point to the frame below) */
2556 frame->link = prev_frame;
2557 prev_was_update_frame = is_update_frame;
2560 /* Now slide all words from sp up to the next frame */
2562 if (displacement > 0) {
2563 P_ next_frame_bottom;
2565 if (next_frame != NULL)
2566 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2568 next_frame_bottom = tso->sp - 1;
2571 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2575 while (sp >= next_frame_bottom) {
2576 sp[displacement] = *sp;
2580 (P_)prev_frame = (P_)frame + displacement;
2584 tso->sp += displacement;
2585 tso->su = prev_frame;
2588 /* -----------------------------------------------------------------------------
2591 * We have to prepare for GC - this means doing lazy black holing
2592 * here. We also take the opportunity to do stack squeezing if it's
2594 * -------------------------------------------------------------------------- */
2597 threadPaused(StgTSO *tso)
2599 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2600 threadSqueezeStack(tso); /* does black holing too */
2602 threadLazyBlackHole(tso);