1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.14 1999/01/19 15:41:56 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
26 /* STATIC OBJECT LIST.
29 * We maintain a linked list of static objects that are still live.
30 * The requirements for this list are:
32 * - we need to scan the list while adding to it, in order to
33 * scavenge all the static objects (in the same way that
34 * breadth-first scavenging works for dynamic objects).
36 * - we need to be able to tell whether an object is already on
37 * the list, to break loops.
39 * Each static object has a "static link field", which we use for
40 * linking objects on to the list. We use a stack-type list, consing
41 * objects on the front as they are added (this means that the
42 * scavenge phase is depth-first, not breadth-first, but that
45 * A separate list is kept for objects that have been scavenged
46 * already - this is so that we can zero all the marks afterwards.
48 * An object is on the list if its static link field is non-zero; this
49 * means that we have to mark the end of the list with '1', not NULL.
51 * Extra notes for generational GC:
53 * Each generation has a static object list associated with it. When
54 * collecting generations up to N, we treat the static object lists
55 * from generations > N as roots.
57 * We build up a static object list while collecting generations 0..N,
58 * which is then appended to the static object list of generation N+1.
60 StgClosure* static_objects; /* live static objects */
61 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
63 /* N is the oldest generation being collected, where the generations
64 * are numbered starting at 0. A major GC (indicated by the major_gc
65 * flag) is when we're collecting all generations. We only attempt to
66 * deal with static objects and GC CAFs when doing a major GC.
69 static rtsBool major_gc;
71 /* Youngest generation that objects should be evacuated to in
72 * evacuate(). (Logically an argument to evacuate, but it's static
73 * a lot of the time so we optimise it into a global variable).
79 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
80 static rtsBool weak_done; /* all done for this pass */
82 /* Flag indicating failure to evacuate an object to the desired
85 static rtsBool failed_to_evac;
87 /* -----------------------------------------------------------------------------
88 Static function declarations
89 -------------------------------------------------------------------------- */
91 static StgClosure *evacuate(StgClosure *q);
92 static void zeroStaticObjectList(StgClosure* first_static);
93 static rtsBool traverse_weak_ptr_list(void);
94 static void zeroMutableList(StgMutClosure *first);
95 static void revertDeadCAFs(void);
97 static void scavenge_stack(StgPtr p, StgPtr stack_end);
98 static void scavenge_large(step *step);
99 static void scavenge(step *step);
100 static void scavenge_static(void);
101 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
104 static void gcCAFs(void);
107 /* -----------------------------------------------------------------------------
110 For garbage collecting generation N (and all younger generations):
112 - follow all pointers in the root set. the root set includes all
113 mutable objects in all steps in all generations.
115 - for each pointer, evacuate the object it points to into either
116 + to-space in the next higher step in that generation, if one exists,
117 + if the object's generation == N, then evacuate it to the next
118 generation if one exists, or else to-space in the current
120 + if the object's generation < N, then evacuate it to to-space
121 in the next generation.
123 - repeatedly scavenge to-space from each step in each generation
124 being collected until no more objects can be evacuated.
126 - free from-space in each step, and set from-space = to-space.
128 -------------------------------------------------------------------------- */
130 void GarbageCollect(void (*get_roots)(void))
134 lnat live, allocated, collected = 0;
138 CostCentreStack *prev_CCS;
141 /* tell the stats department that we've started a GC */
144 /* attribute any costs to CCS_GC */
150 /* We might have been called from Haskell land by _ccall_GC, in
151 * which case we need to call threadPaused() because the scheduler
152 * won't have done it.
154 if (CurrentTSO) { threadPaused(CurrentTSO); }
156 /* Approximate how much we allocated: number of blocks in the
157 * nursery + blocks allocated via allocate() - unused nusery blocks.
158 * This leaves a little slop at the end of each block, and doesn't
159 * take into account large objects (ToDo).
161 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
162 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
163 allocated -= BLOCK_SIZE_W;
166 /* Figure out which generation to collect
168 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
169 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
173 major_gc = (N == RtsFlags.GcFlags.generations-1);
175 /* check stack sanity *before* GC (ToDo: check all threads) */
176 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
177 IF_DEBUG(sanity, checkFreeListSanity());
179 /* Initialise the static object lists
181 static_objects = END_OF_STATIC_LIST;
182 scavenged_static_objects = END_OF_STATIC_LIST;
184 /* zero the mutable list for the oldest generation (see comment by
185 * zeroMutableList below).
188 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
191 /* Initialise to-space in all the generations/steps that we're
194 for (g = 0; g <= N; g++) {
195 generations[g].mut_list = END_MUT_LIST;
197 for (s = 0; s < generations[g].n_steps; s++) {
198 /* generation 0, step 0 doesn't need to-space */
199 if (g == 0 && s == 0) { continue; }
200 /* Get a free block for to-space. Extra blocks will be chained on
204 step = &generations[g].steps[s];
205 ASSERT(step->gen->no == g);
206 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
207 bd->gen = &generations[g];
210 bd->evacuated = 1; /* it's a to-space block */
211 step->hp = bd->start;
212 step->hpLim = step->hp + BLOCK_SIZE_W;
215 step->to_blocks = 1; /* ???? */
216 step->scan = bd->start;
218 step->new_large_objects = NULL;
219 step->scavenged_large_objects = NULL;
220 /* mark the large objects as not evacuated yet */
221 for (bd = step->large_objects; bd; bd = bd->link) {
227 /* make sure the older generations have at least one block to
228 * allocate into (this makes things easier for copy(), see below.
230 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
231 for (s = 0; s < generations[g].n_steps; s++) {
232 step = &generations[g].steps[s];
233 if (step->hp_bd == NULL) {
235 bd->gen = &generations[g];
238 bd->evacuated = 0; /* *not* a to-space block */
239 step->hp = bd->start;
240 step->hpLim = step->hp + BLOCK_SIZE_W;
245 /* Set the scan pointer for older generations: remember we
246 * still have to scavenge objects that have been promoted. */
247 step->scan = step->hp;
248 step->scan_bd = step->hp_bd;
249 step->to_space = NULL;
251 step->new_large_objects = NULL;
252 step->scavenged_large_objects = NULL;
256 /* -----------------------------------------------------------------------
257 * follow all the roots that we know about:
258 * - mutable lists from each generation > N
259 * we want to *scavenge* these roots, not evacuate them: they're not
260 * going to move in this GC.
261 * Also: do them in reverse generation order. This is because we
262 * often want to promote objects that are pointed to by older
263 * generations early, so we don't have to repeatedly copy them.
264 * Doing the generations in reverse order ensures that we don't end
265 * up in the situation where we want to evac an object to gen 3 and
266 * it has already been evaced to gen 2.
269 StgMutClosure *tmp, **pp;
270 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
271 generations[g].saved_mut_list = generations[g].mut_list;
272 generations[g].mut_list = END_MUT_LIST;
275 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
276 tmp = scavenge_mutable_list(generations[g].saved_mut_list, g);
277 pp = &generations[g].mut_list;
278 while (*pp != END_MUT_LIST) {
279 pp = &(*pp)->mut_link;
285 /* follow all the roots that the application knows about.
290 /* And don't forget to mark the TSO if we got here direct from
293 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
296 /* Mark the weak pointer list, and prepare to detect dead weak
300 old_weak_ptr_list = weak_ptr_list;
301 weak_ptr_list = NULL;
302 weak_done = rtsFalse;
306 /* ToDo: To fix the caf leak, we need to make the commented out
307 * parts of this code do something sensible - as described in
310 extern void markHugsObjects(void);
312 /* ToDo: This (undefined) function should contain the scavenge
313 * loop immediately below this block of code - but I'm not sure
314 * enough of the details to do this myself.
316 scavengeEverything();
317 /* revert dead CAFs and update enteredCAFs list */
322 /* This will keep the CAFs and the attached BCOs alive
323 * but the values will have been reverted
325 scavengeEverything();
330 /* -------------------------------------------------------------------------
331 * Repeatedly scavenge all the areas we know about until there's no
332 * more scavenging to be done.
339 /* scavenge static objects */
340 if (major_gc && static_objects != END_OF_STATIC_LIST) {
344 /* When scavenging the older generations: Objects may have been
345 * evacuated from generations <= N into older generations, and we
346 * need to scavenge these objects. We're going to try to ensure that
347 * any evacuations that occur move the objects into at least the
348 * same generation as the object being scavenged, otherwise we
349 * have to create new entries on the mutable list for the older
353 /* scavenge each step in generations 0..maxgen */
356 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
357 for (s = 0; s < generations[gen].n_steps; s++) {
358 step = &generations[gen].steps[s];
360 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
364 if (step->new_large_objects != NULL) {
365 scavenge_large(step);
371 if (flag) { goto loop; }
373 /* must be last... */
374 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
379 /* Set the maximum blocks for the oldest generation, based on twice
380 * the amount of live data now, adjusted to fit the maximum heap
383 * This is an approximation, since in the worst case we'll need
384 * twice the amount of live data plus whatever space the other
388 oldest_gen->max_blocks =
389 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
390 RtsFlags.GcFlags.minOldGenSize);
391 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
392 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
393 if (((int)oldest_gen->max_blocks - (int)oldest_gen->steps[0].to_blocks) <
394 (RtsFlags.GcFlags.pcFreeHeap *
395 RtsFlags.GcFlags.maxHeapSize / 200)) {
401 /* run through all the generations/steps and tidy up
403 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
406 generations[g].collections++; /* for stats */
409 for (s = 0; s < generations[g].n_steps; s++) {
411 step = &generations[g].steps[s];
413 if (!(g == 0 && s == 0)) {
414 /* Tidy the end of the to-space chains */
415 step->hp_bd->free = step->hp;
416 step->hp_bd->link = NULL;
419 /* for generations we collected... */
422 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
424 /* free old memory and shift to-space into from-space for all
425 * the collected steps (except the allocation area). These
426 * freed blocks will probaby be quickly recycled.
428 if (!(g == 0 && s == 0)) {
429 freeChain(step->blocks);
430 step->blocks = step->to_space;
431 step->n_blocks = step->to_blocks;
432 step->to_space = NULL;
434 for (bd = step->blocks; bd != NULL; bd = bd->link) {
435 bd->evacuated = 0; /* now from-space */
439 /* LARGE OBJECTS. The current live large objects are chained on
440 * scavenged_large, having been moved during garbage
441 * collection from large_objects. Any objects left on
442 * large_objects list are therefore dead, so we free them here.
444 for (bd = step->large_objects; bd != NULL; bd = next) {
449 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
452 step->large_objects = step->scavenged_large_objects;
454 /* Set the maximum blocks for this generation, interpolating
455 * between the maximum size of the oldest and youngest
458 * max_blocks = oldgen_max_blocks * G
459 * -----------------------
463 generations[g].max_blocks =
464 stg_max(RtsFlags.GcFlags.minOldGenSize,
465 (oldest_gen->max_blocks * g) /
466 (RtsFlags.GcFlags.generations-1));
469 /* for older generations... */
472 /* For older generations, we need to append the
473 * scavenged_large_object list (i.e. large objects that have been
474 * promoted during this GC) to the large_object list for that step.
476 for (bd = step->scavenged_large_objects; bd; bd = next) {
479 dbl_link_onto(bd, &step->large_objects);
482 /* add the new blocks we promoted during this GC */
483 step->n_blocks += step->to_blocks;
488 /* revert dead CAFs and update enteredCAFs list */
491 /* mark the garbage collected CAFs as dead */
493 if (major_gc) { gcCAFs(); }
496 /* zero the scavenged static object list */
498 zeroStaticObjectList(scavenged_static_objects);
503 for (bd = g0s0->blocks; bd; bd = bd->link) {
504 bd->free = bd->start;
505 ASSERT(bd->gen == g0);
506 ASSERT(bd->step == g0s0);
508 current_nursery = g0s0->blocks;
511 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
512 for (s = 0; s < generations[g].n_steps; s++) {
513 /* approximate amount of live data (doesn't take into account slop
514 * at end of each block). ToDo: this more accurately.
516 if (g == 0 && s == 0) { continue; }
517 step = &generations[g].steps[s];
518 live += step->n_blocks * BLOCK_SIZE_W +
519 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
523 /* Free the small objects allocated via allocate(), since this will
524 * all have been copied into G0S1 now.
526 if (small_alloc_list != NULL) {
527 freeChain(small_alloc_list);
529 small_alloc_list = NULL;
531 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
533 /* start any pending finalisers */
534 scheduleFinalisers(old_weak_ptr_list);
536 /* check sanity after GC */
538 for (g = 0; g <= N; g++) {
539 for (s = 0; s < generations[g].n_steps; s++) {
540 if (g == 0 && s == 0) { continue; }
541 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
542 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
545 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
546 for (s = 0; s < generations[g].n_steps; s++) {
547 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks,
548 generations[g].steps[s].blocks->start));
549 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
552 IF_DEBUG(sanity, checkFreeListSanity());
555 IF_DEBUG(gc, stat_describe_gens());
558 /* symbol-table based profiling */
559 /* heapCensus(to_space); */ /* ToDo */
562 /* restore enclosing cost centre */
567 /* check for memory leaks if sanity checking is on */
568 IF_DEBUG(sanity, memInventory());
570 /* ok, GC over: tell the stats department what happened. */
571 stat_endGC(allocated, collected, live, N);
574 /* -----------------------------------------------------------------------------
577 traverse_weak_ptr_list is called possibly many times during garbage
578 collection. It returns a flag indicating whether it did any work
579 (i.e. called evacuate on any live pointers).
581 Invariant: traverse_weak_ptr_list is called when the heap is in an
582 idempotent state. That means that there are no pending
583 evacuate/scavenge operations. This invariant helps the weak
584 pointer code decide which weak pointers are dead - if there are no
585 new live weak pointers, then all the currently unreachable ones are
588 For generational GC: we just don't try to finalise weak pointers in
589 older generations than the one we're collecting. This could
590 probably be optimised by keeping per-generation lists of weak
591 pointers, but for a few weak pointers this scheme will work.
592 -------------------------------------------------------------------------- */
595 traverse_weak_ptr_list(void)
597 StgWeak *w, **last_w, *next_w;
599 const StgInfoTable *info;
600 rtsBool flag = rtsFalse;
602 if (weak_done) { return rtsFalse; }
604 /* doesn't matter where we evacuate values/finalisers to, since
605 * these pointers are treated as roots (iff the keys are alive).
609 last_w = &old_weak_ptr_list;
610 for (w = old_weak_ptr_list; w; w = next_w) {
613 /* ignore weak pointers in older generations */
614 if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) {
615 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive (in old gen) at %p\n", w));
616 /* remove this weak ptr from the old_weak_ptr list */
618 /* and put it on the new weak ptr list */
620 w->link = weak_ptr_list;
626 info = get_itbl(target);
627 switch (info->type) {
632 case IND_OLDGEN: /* rely on compatible layout with StgInd */
633 case IND_OLDGEN_PERM:
634 /* follow indirections */
635 target = ((StgInd *)target)->indirectee;
639 /* If key is alive, evacuate value and finaliser and
640 * place weak ptr on new weak ptr list.
642 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w));
643 w->key = ((StgEvacuated *)target)->evacuee;
644 w->value = evacuate(w->value);
645 w->finaliser = evacuate(w->finaliser);
647 /* remove this weak ptr from the old_weak_ptr list */
650 /* and put it on the new weak ptr list */
652 w->link = weak_ptr_list;
657 default: /* key is dead */
664 /* If we didn't make any changes, then we can go round and kill all
665 * the dead weak pointers. The old_weak_ptr list is used as a list
666 * of pending finalisers later on.
668 if (flag == rtsFalse) {
669 for (w = old_weak_ptr_list; w; w = w->link) {
670 w->value = evacuate(w->value);
671 w->finaliser = evacuate(w->finaliser);
680 MarkRoot(StgClosure *root)
682 root = evacuate(root);
686 static inline void addBlock(step *step)
688 bdescr *bd = allocBlock();
692 if (step->gen->no <= N) {
698 step->hp_bd->free = step->hp;
699 step->hp_bd->link = bd;
700 step->hp = bd->start;
701 step->hpLim = step->hp + BLOCK_SIZE_W;
706 static __inline__ StgClosure *
707 copy(StgClosure *src, nat size, bdescr *bd)
712 /* Find out where we're going, using the handy "to" pointer in
713 * the step of the source object. If it turns out we need to
714 * evacuate to an older generation, adjust it here (see comment
718 if (step->gen->no < evac_gen) {
719 step = &generations[evac_gen].steps[0];
722 /* chain a new block onto the to-space for the destination step if
725 if (step->hp + size >= step->hpLim) {
731 for(to = dest, from = (P_)src; size>0; --size) {
734 return (StgClosure *)dest;
737 /* Special version of copy() for when we only want to copy the info
738 * pointer of an object, but reserve some padding after it. This is
739 * used to optimise evacuation of BLACKHOLEs.
742 static __inline__ StgClosure *
743 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, bdescr *bd)
749 if (step->gen->no < evac_gen) {
750 step = &generations[evac_gen].steps[0];
753 if (step->hp + size_to_reserve >= step->hpLim) {
758 step->hp += size_to_reserve;
759 for(to = dest, from = (P_)src; size_to_copy>0; --size_to_copy) {
763 return (StgClosure *)dest;
766 static __inline__ void
767 upd_evacuee(StgClosure *p, StgClosure *dest)
769 StgEvacuated *q = (StgEvacuated *)p;
771 SET_INFO(q,&EVACUATED_info);
775 /* -----------------------------------------------------------------------------
776 Evacuate a mutable object
778 If we evacuate a mutable object to an old generation, cons the
779 object onto the older generation's mutable list.
780 -------------------------------------------------------------------------- */
783 evacuate_mutable(StgMutClosure *c)
788 if (bd->gen->no > 0) {
789 c->mut_link = bd->gen->mut_list;
790 bd->gen->mut_list = c;
794 /* -----------------------------------------------------------------------------
795 Evacuate a large object
797 This just consists of removing the object from the (doubly-linked)
798 large_alloc_list, and linking it on to the (singly-linked)
799 new_large_objects list, from where it will be scavenged later.
801 Convention: bd->evacuated is /= 0 for a large object that has been
802 evacuated, or 0 otherwise.
803 -------------------------------------------------------------------------- */
806 evacuate_large(StgPtr p, rtsBool mutable)
808 bdescr *bd = Bdescr(p);
811 /* should point to the beginning of the block */
812 ASSERT(((W_)p & BLOCK_MASK) == 0);
814 /* already evacuated? */
816 /* Don't forget to set the failed_to_evac flag if we didn't get
817 * the desired destination (see comments in evacuate()).
819 if (bd->gen->no < evac_gen) {
820 failed_to_evac = rtsTrue;
826 /* remove from large_object list */
828 bd->back->link = bd->link;
829 } else { /* first object in the list */
830 step->large_objects = bd->link;
833 bd->link->back = bd->back;
836 /* link it on to the evacuated large object list of the destination step
839 if (step->gen->no < evac_gen) {
840 step = &generations[evac_gen].steps[0];
845 bd->link = step->new_large_objects;
846 step->new_large_objects = bd;
850 evacuate_mutable((StgMutClosure *)p);
854 /* -----------------------------------------------------------------------------
855 Adding a MUT_CONS to an older generation.
857 This is necessary from time to time when we end up with an
858 old-to-new generation pointer in a non-mutable object. We defer
859 the promotion until the next GC.
860 -------------------------------------------------------------------------- */
863 mkMutCons(StgClosure *ptr, generation *gen)
868 step = &gen->steps[0];
870 /* chain a new block onto the to-space for the destination step if
873 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
877 q = (StgMutVar *)step->hp;
878 step->hp += sizeofW(StgMutVar);
880 SET_HDR(q,&MUT_CONS_info,CCS_GC);
882 evacuate_mutable((StgMutClosure *)q);
884 return (StgClosure *)q;
887 /* -----------------------------------------------------------------------------
890 This is called (eventually) for every live object in the system.
892 The caller to evacuate specifies a desired generation in the
893 evac_gen global variable. The following conditions apply to
894 evacuating an object which resides in generation M when we're
895 collecting up to generation N
899 else evac to step->to
901 if M < evac_gen evac to evac_gen, step 0
903 if the object is already evacuated, then we check which generation
906 if M >= evac_gen do nothing
907 if M < evac_gen set failed_to_evac flag to indicate that we
908 didn't manage to evacuate this object into evac_gen.
910 -------------------------------------------------------------------------- */
914 evacuate(StgClosure *q)
918 const StgInfoTable *info;
921 if (!LOOKS_LIKE_STATIC(q)) {
923 if (bd->gen->no > N) {
924 /* Can't evacuate this object, because it's in a generation
925 * older than the ones we're collecting. Let's hope that it's
926 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
928 if (bd->gen->no < evac_gen) {
930 failed_to_evac = rtsTrue;
936 /* make sure the info pointer is into text space */
937 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
938 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
941 switch (info -> type) {
944 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),bd);
950 to = copy(q,sizeW_fromITBL(info),bd);
952 evacuate_mutable((StgMutClosure *)to);
959 case IND_OLDGEN_PERM:
964 to = copy(q,sizeW_fromITBL(info),bd);
970 to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),bd);
975 to = copy(q,BLACKHOLE_sizeW(),bd);
977 evacuate_mutable((StgMutClosure *)to);
982 const StgInfoTable* selectee_info;
983 StgClosure* selectee = ((StgSelector*)q)->selectee;
986 selectee_info = get_itbl(selectee);
987 switch (selectee_info->type) {
991 StgNat32 offset = info->layout.selector_offset;
993 /* check that the size is in range */
995 (StgNat32)(selectee_info->layout.payload.ptrs +
996 selectee_info->layout.payload.nptrs));
998 /* perform the selection! */
999 q = selectee->payload[offset];
1001 /* if we're already in to-space, there's no need to continue
1002 * with the evacuation, just update the source address with
1003 * a pointer to the (evacuated) constructor field.
1005 if (IS_USER_PTR(q)) {
1006 bdescr *bd = Bdescr((P_)q);
1007 if (bd->evacuated) {
1008 if (bd->gen->no < evac_gen) {
1009 failed_to_evac = rtsTrue;
1015 /* otherwise, carry on and evacuate this constructor field,
1016 * (but not the constructor itself)
1025 case IND_OLDGEN_PERM:
1026 selectee = stgCast(StgInd *,selectee)->indirectee;
1030 selectee = stgCast(StgCAF *,selectee)->value;
1034 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1039 case THUNK_SELECTOR:
1040 /* aargh - do recursively???? */
1045 /* not evaluated yet */
1049 barf("evacuate: THUNK_SELECTOR: strange selectee");
1052 to = copy(q,THUNK_SELECTOR_sizeW(),bd);
1058 /* follow chains of indirections, don't evacuate them */
1059 q = ((StgInd*)q)->indirectee;
1062 /* ToDo: optimise STATIC_LINK for known cases.
1063 - FUN_STATIC : payload[0]
1064 - THUNK_STATIC : payload[1]
1065 - IND_STATIC : payload[1]
1069 if (info->srt_len == 0) { /* small optimisation */
1075 /* don't want to evacuate these, but we do want to follow pointers
1076 * from SRTs - see scavenge_static.
1079 /* put the object on the static list, if necessary.
1081 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1082 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1083 static_objects = (StgClosure *)q;
1087 case CONSTR_INTLIKE:
1088 case CONSTR_CHARLIKE:
1089 case CONSTR_NOCAF_STATIC:
1090 /* no need to put these on the static linked list, they don't need
1105 /* shouldn't see these */
1106 barf("evacuate: stack frame\n");
1110 /* these are special - the payload is a copy of a chunk of stack,
1112 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),bd);
1117 /* Already evacuated, just return the forwarding address.
1118 * HOWEVER: if the requested destination generation (evac_gen) is
1119 * older than the actual generation (because the object was
1120 * already evacuated to a younger generation) then we have to
1121 * set the failed_to_evac flag to indicate that we couldn't
1122 * manage to promote the object to the desired generation.
1124 if (evac_gen > 0) { /* optimisation */
1125 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1126 if (Bdescr((P_)p)->gen->no < evac_gen) {
1127 /* fprintf(stderr,"evac failed!\n");*/
1128 failed_to_evac = rtsTrue;
1131 return ((StgEvacuated*)q)->evacuee;
1136 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1138 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1139 evacuate_large((P_)q, rtsFalse);
1142 /* just copy the block */
1143 to = copy(q,size,bd);
1150 case MUT_ARR_PTRS_FROZEN:
1152 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1154 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1155 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1158 /* just copy the block */
1159 to = copy(q,size,bd);
1161 if (info->type == MUT_ARR_PTRS) {
1162 evacuate_mutable((StgMutClosure *)to);
1170 StgTSO *tso = stgCast(StgTSO *,q);
1171 nat size = tso_sizeW(tso);
1174 /* Large TSOs don't get moved, so no relocation is required.
1176 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1177 evacuate_large((P_)q, rtsTrue);
1180 /* To evacuate a small TSO, we need to relocate the update frame
1184 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),bd);
1186 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1188 /* relocate the stack pointers... */
1189 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1190 new_tso->sp = (StgPtr)new_tso->sp + diff;
1191 new_tso->splim = (StgPtr)new_tso->splim + diff;
1193 relocate_TSO(tso, new_tso);
1194 upd_evacuee(q,(StgClosure *)new_tso);
1196 evacuate_mutable((StgMutClosure *)new_tso);
1197 return (StgClosure *)new_tso;
1203 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1207 barf("evacuate: strange closure type");
1213 /* -----------------------------------------------------------------------------
1214 relocate_TSO is called just after a TSO has been copied from src to
1215 dest. It adjusts the update frame list for the new location.
1216 -------------------------------------------------------------------------- */
1219 relocate_TSO(StgTSO *src, StgTSO *dest)
1226 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1230 while ((P_)su < dest->stack + dest->stack_size) {
1231 switch (get_itbl(su)->type) {
1233 /* GCC actually manages to common up these three cases! */
1236 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1241 cf = (StgCatchFrame *)su;
1242 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1247 sf = (StgSeqFrame *)su;
1248 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1257 barf("relocate_TSO");
1266 scavenge_srt(const StgInfoTable *info)
1268 StgClosure **srt, **srt_end;
1270 /* evacuate the SRT. If srt_len is zero, then there isn't an
1271 * srt field in the info table. That's ok, because we'll
1272 * never dereference it.
1274 srt = stgCast(StgClosure **,info->srt);
1275 srt_end = srt + info->srt_len;
1276 for (; srt < srt_end; srt++) {
1281 /* -----------------------------------------------------------------------------
1282 Scavenge a given step until there are no more objects in this step
1285 evac_gen is set by the caller to be either zero (for a step in a
1286 generation < N) or G where G is the generation of the step being
1289 We sometimes temporarily change evac_gen back to zero if we're
1290 scavenging a mutable object where early promotion isn't such a good
1292 -------------------------------------------------------------------------- */
1296 scavenge(step *step)
1299 const StgInfoTable *info;
1301 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1306 failed_to_evac = rtsFalse;
1308 /* scavenge phase - standard breadth-first scavenging of the
1312 while (bd != step->hp_bd || p < step->hp) {
1314 /* If we're at the end of this block, move on to the next block */
1315 if (bd != step->hp_bd && p == bd->free) {
1321 q = p; /* save ptr to object */
1323 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1324 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1326 info = get_itbl((StgClosure *)p);
1327 switch (info -> type) {
1331 StgBCO* bco = stgCast(StgBCO*,p);
1333 for (i = 0; i < bco->n_ptrs; i++) {
1334 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1336 p += bco_sizeW(bco);
1341 /* treat MVars specially, because we don't want to evacuate the
1342 * mut_link field in the middle of the closure.
1345 StgMVar *mvar = ((StgMVar *)p);
1347 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1348 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1349 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1350 p += sizeofW(StgMVar);
1351 evac_gen = saved_evac_gen;
1364 case IND_OLDGEN_PERM:
1370 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1371 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1372 (StgClosure *)*p = evacuate((StgClosure *)*p);
1374 p += info->layout.payload.nptrs;
1379 /* ignore MUT_CONSs */
1380 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1382 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1383 evac_gen = saved_evac_gen;
1385 p += sizeofW(StgMutVar);
1390 p += BLACKHOLE_sizeW();
1395 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1396 (StgClosure *)bh->blocking_queue =
1397 evacuate((StgClosure *)bh->blocking_queue);
1398 if (failed_to_evac) {
1399 failed_to_evac = rtsFalse;
1400 evacuate_mutable((StgMutClosure *)bh);
1402 p += BLACKHOLE_sizeW();
1406 case THUNK_SELECTOR:
1408 StgSelector *s = (StgSelector *)p;
1409 s->selectee = evacuate(s->selectee);
1410 p += THUNK_SELECTOR_sizeW();
1416 barf("scavenge:IND???\n");
1418 case CONSTR_INTLIKE:
1419 case CONSTR_CHARLIKE:
1421 case CONSTR_NOCAF_STATIC:
1425 /* Shouldn't see a static object here. */
1426 barf("scavenge: STATIC object\n");
1438 /* Shouldn't see stack frames here. */
1439 barf("scavenge: stack frame\n");
1441 case AP_UPD: /* same as PAPs */
1443 /* Treat a PAP just like a section of stack, not forgetting to
1444 * evacuate the function pointer too...
1447 StgPAP* pap = stgCast(StgPAP*,p);
1449 pap->fun = evacuate(pap->fun);
1450 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1451 p += pap_sizeW(pap);
1457 /* nothing to follow */
1458 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1462 /* follow everything */
1466 evac_gen = 0; /* repeatedly mutable */
1467 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1468 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1469 (StgClosure *)*p = evacuate((StgClosure *)*p);
1471 evac_gen = saved_evac_gen;
1475 case MUT_ARR_PTRS_FROZEN:
1476 /* follow everything */
1478 StgPtr start = p, next;
1480 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1481 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1482 (StgClosure *)*p = evacuate((StgClosure *)*p);
1484 if (failed_to_evac) {
1485 /* we can do this easier... */
1486 evacuate_mutable((StgMutClosure *)start);
1487 failed_to_evac = rtsFalse;
1498 /* chase the link field for any TSOs on the same queue */
1499 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1500 /* scavenge this thread's stack */
1501 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1502 evac_gen = saved_evac_gen;
1503 p += tso_sizeW(tso);
1510 barf("scavenge: unimplemented/strange closure type\n");
1516 /* If we didn't manage to promote all the objects pointed to by
1517 * the current object, then we have to designate this object as
1518 * mutable (because it contains old-to-new generation pointers).
1520 if (failed_to_evac) {
1521 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1522 failed_to_evac = rtsFalse;
1530 /* -----------------------------------------------------------------------------
1531 Scavenge one object.
1533 This is used for objects that are temporarily marked as mutable
1534 because they contain old-to-new generation pointers. Only certain
1535 objects can have this property.
1536 -------------------------------------------------------------------------- */
1538 scavenge_one(StgPtr p)
1543 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1544 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1546 info = get_itbl((StgClosure *)p);
1548 switch (info -> type) {
1556 case IND_OLDGEN_PERM:
1562 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1563 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1564 (StgClosure *)*p = evacuate((StgClosure *)*p);
1573 case THUNK_SELECTOR:
1575 StgSelector *s = (StgSelector *)p;
1576 s->selectee = evacuate(s->selectee);
1580 case AP_UPD: /* same as PAPs */
1582 /* Treat a PAP just like a section of stack, not forgetting to
1583 * evacuate the function pointer too...
1586 StgPAP* pap = stgCast(StgPAP*,p);
1588 pap->fun = evacuate(pap->fun);
1589 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1594 /* This might happen if for instance a MUT_CONS was pointing to a
1595 * THUNK which has since been updated. The IND_OLDGEN will
1596 * be on the mutable list anyway, so we don't need to do anything
1602 barf("scavenge_one: strange object");
1605 no_luck = failed_to_evac;
1606 failed_to_evac = rtsFalse;
1611 /* -----------------------------------------------------------------------------
1612 Scavenging mutable lists.
1614 We treat the mutable list of each generation > N (i.e. all the
1615 generations older than the one being collected) as roots. We also
1616 remove non-mutable objects from the mutable list at this point.
1617 -------------------------------------------------------------------------- */
1619 static StgMutClosure *
1620 scavenge_mutable_list(StgMutClosure *p, nat gen)
1623 StgMutClosure *start;
1624 StgMutClosure **prev;
1631 failed_to_evac = rtsFalse;
1633 for (; p != END_MUT_LIST; p = *prev) {
1635 /* make sure the info pointer is into text space */
1636 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1637 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1640 switch(info->type) {
1642 case MUT_ARR_PTRS_FROZEN:
1643 /* remove this guy from the mutable list, but follow the ptrs
1644 * anyway (and make sure they get promoted to this gen).
1649 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1651 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1652 (StgClosure *)*q = evacuate((StgClosure *)*q);
1656 if (failed_to_evac) {
1657 failed_to_evac = rtsFalse;
1658 prev = &p->mut_link;
1660 *prev = p->mut_link;
1666 /* follow everything */
1667 prev = &p->mut_link;
1671 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1672 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1673 (StgClosure *)*q = evacuate((StgClosure *)*q);
1679 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1680 * it from the mutable list if possible by promoting whatever it
1683 if (p->header.info == &MUT_CONS_info) {
1685 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1686 /* didn't manage to promote everything, so leave the
1687 * MUT_CONS on the list.
1689 prev = &p->mut_link;
1691 *prev = p->mut_link;
1695 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1696 prev = &p->mut_link;
1702 StgMVar *mvar = (StgMVar *)p;
1703 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1704 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1705 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1706 prev = &p->mut_link;
1711 /* follow ptrs and remove this from the mutable list */
1713 StgTSO *tso = (StgTSO *)p;
1715 /* Don't bother scavenging if this thread is dead
1717 if (!(tso->whatNext == ThreadComplete ||
1718 tso->whatNext == ThreadKilled)) {
1719 /* Don't need to chase the link field for any TSOs on the
1720 * same queue. Just scavenge this thread's stack
1722 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1725 /* Don't take this TSO off the mutable list - it might still
1726 * point to some younger objects (because we set evac_gen to 0
1729 prev = &tso->mut_link;
1734 case IND_OLDGEN_PERM:
1736 /* Try to pull the indirectee into this generation, so we can
1737 * remove the indirection from the mutable list.
1740 ((StgIndOldGen *)p)->indirectee =
1741 evacuate(((StgIndOldGen *)p)->indirectee);
1744 if (failed_to_evac) {
1745 failed_to_evac = rtsFalse;
1746 prev = &p->mut_link;
1748 *prev = p->mut_link;
1749 /* the mut_link field of an IND_STATIC is overloaded as the
1750 * static link field too (it just so happens that we don't need
1751 * both at the same time), so we need to NULL it out when
1752 * removing this object from the mutable list because the static
1753 * link fields are all assumed to be NULL before doing a major
1762 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1763 (StgClosure *)bh->blocking_queue =
1764 evacuate((StgClosure *)bh->blocking_queue);
1765 prev = &p->mut_link;
1770 /* shouldn't have anything else on the mutables list */
1771 barf("scavenge_mutable_object: non-mutable object?");
1778 scavenge_static(void)
1780 StgClosure* p = static_objects;
1781 const StgInfoTable *info;
1783 /* Always evacuate straight to the oldest generation for static
1785 evac_gen = oldest_gen->no;
1787 /* keep going until we've scavenged all the objects on the linked
1789 while (p != END_OF_STATIC_LIST) {
1793 /* make sure the info pointer is into text space */
1794 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1795 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1797 /* Take this object *off* the static_objects list,
1798 * and put it on the scavenged_static_objects list.
1800 static_objects = STATIC_LINK(info,p);
1801 STATIC_LINK(info,p) = scavenged_static_objects;
1802 scavenged_static_objects = p;
1804 switch (info -> type) {
1808 StgInd *ind = (StgInd *)p;
1809 ind->indirectee = evacuate(ind->indirectee);
1811 /* might fail to evacuate it, in which case we have to pop it
1812 * back on the mutable list (and take it off the
1813 * scavenged_static list because the static link and mut link
1814 * pointers are one and the same).
1816 if (failed_to_evac) {
1817 failed_to_evac = rtsFalse;
1818 scavenged_static_objects = STATIC_LINK(info,p);
1819 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
1820 oldest_gen->mut_list = (StgMutClosure *)ind;
1834 next = (P_)p->payload + info->layout.payload.ptrs;
1835 /* evacuate the pointers */
1836 for (q = (P_)p->payload; q < next; q++) {
1837 (StgClosure *)*q = evacuate((StgClosure *)*q);
1843 barf("scavenge_static");
1846 ASSERT(failed_to_evac == rtsFalse);
1848 /* get the next static object from the list. Remeber, there might
1849 * be more stuff on this list now that we've done some evacuating!
1850 * (static_objects is a global)
1856 /* -----------------------------------------------------------------------------
1857 scavenge_stack walks over a section of stack and evacuates all the
1858 objects pointed to by it. We can use the same code for walking
1859 PAPs, since these are just sections of copied stack.
1860 -------------------------------------------------------------------------- */
1863 scavenge_stack(StgPtr p, StgPtr stack_end)
1866 const StgInfoTable* info;
1870 * Each time around this loop, we are looking at a chunk of stack
1871 * that starts with either a pending argument section or an
1872 * activation record.
1875 while (p < stack_end) {
1876 q = *stgCast(StgPtr*,p);
1878 /* If we've got a tag, skip over that many words on the stack */
1879 if (IS_ARG_TAG(stgCast(StgWord,q))) {
1884 /* Is q a pointer to a closure?
1886 if (! LOOKS_LIKE_GHC_INFO(q)) {
1889 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
1890 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
1892 /* otherwise, must be a pointer into the allocation space.
1896 (StgClosure *)*p = evacuate((StgClosure *)q);
1902 * Otherwise, q must be the info pointer of an activation
1903 * record. All activation records have 'bitmap' style layout
1906 info = get_itbl(stgCast(StgClosure*,p));
1908 switch (info->type) {
1910 /* Dynamic bitmap: the mask is stored on the stack */
1912 bitmap = stgCast(StgRetDyn*,p)->liveness;
1913 p = &payloadWord(stgCast(StgRetDyn*,p),0);
1916 /* probably a slow-entry point return address: */
1922 /* Specialised code for update frames, since they're so common.
1923 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
1924 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
1928 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1930 StgClosureType type = get_itbl(frame->updatee)->type;
1932 p += sizeofW(StgUpdateFrame);
1933 if (type == EVACUATED) {
1934 frame->updatee = evacuate(frame->updatee);
1937 bdescr *bd = Bdescr((P_)frame->updatee);
1938 if (bd->gen->no > N) {
1939 if (bd->gen->no < evac_gen) {
1940 failed_to_evac = rtsTrue;
1947 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
1948 sizeofW(StgHeader), bd);
1949 upd_evacuee(frame->updatee,to);
1950 frame->updatee = to;
1953 to = copy(frame->updatee, BLACKHOLE_sizeW(), bd);
1954 upd_evacuee(frame->updatee,to);
1955 frame->updatee = to;
1956 evacuate_mutable((StgMutClosure *)to);
1959 barf("scavenge_stack: UPDATE_FRAME updatee");
1964 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
1971 bitmap = info->layout.bitmap;
1974 while (bitmap != 0) {
1975 if ((bitmap & 1) == 0) {
1976 (StgClosure *)*p = evacuate((StgClosure *)*p);
1979 bitmap = bitmap >> 1;
1986 /* large bitmap (> 32 entries) */
1991 StgLargeBitmap *large_bitmap;
1994 large_bitmap = info->layout.large_bitmap;
1997 for (i=0; i<large_bitmap->size; i++) {
1998 bitmap = large_bitmap->bitmap[i];
1999 q = p + sizeof(W_) * 8;
2000 while (bitmap != 0) {
2001 if ((bitmap & 1) == 0) {
2002 (StgClosure *)*p = evacuate((StgClosure *)*p);
2005 bitmap = bitmap >> 1;
2007 if (i+1 < large_bitmap->size) {
2009 (StgClosure *)*p = evacuate((StgClosure *)*p);
2015 /* and don't forget to follow the SRT */
2020 barf("scavenge_stack: weird activation record found on stack.\n");
2025 /*-----------------------------------------------------------------------------
2026 scavenge the large object list.
2028 evac_gen set by caller; similar games played with evac_gen as with
2029 scavenge() - see comment at the top of scavenge(). Most large
2030 objects are (repeatedly) mutable, so most of the time evac_gen will
2032 --------------------------------------------------------------------------- */
2035 scavenge_large(step *step)
2039 const StgInfoTable* info;
2040 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2042 evac_gen = 0; /* most objects are mutable */
2043 bd = step->new_large_objects;
2045 for (; bd != NULL; bd = step->new_large_objects) {
2047 /* take this object *off* the large objects list and put it on
2048 * the scavenged large objects list. This is so that we can
2049 * treat new_large_objects as a stack and push new objects on
2050 * the front when evacuating.
2052 step->new_large_objects = bd->link;
2053 dbl_link_onto(bd, &step->scavenged_large_objects);
2056 info = get_itbl(stgCast(StgClosure*,p));
2058 switch (info->type) {
2060 /* only certain objects can be "large"... */
2064 /* nothing to follow */
2068 /* follow everything */
2072 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2073 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2074 (StgClosure *)*p = evacuate((StgClosure *)*p);
2079 case MUT_ARR_PTRS_FROZEN:
2080 /* follow everything */
2082 StgPtr start = p, next;
2084 evac_gen = saved_evac_gen; /* not really mutable */
2085 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2086 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2087 (StgClosure *)*p = evacuate((StgClosure *)*p);
2090 if (failed_to_evac) {
2091 evacuate_mutable((StgMutClosure *)start);
2098 StgBCO* bco = stgCast(StgBCO*,p);
2100 evac_gen = saved_evac_gen;
2101 for (i = 0; i < bco->n_ptrs; i++) {
2102 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2113 /* chase the link field for any TSOs on the same queue */
2114 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2115 /* scavenge this thread's stack */
2116 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2121 barf("scavenge_large: unknown/strange object");
2127 zeroStaticObjectList(StgClosure* first_static)
2131 const StgInfoTable *info;
2133 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2135 link = STATIC_LINK(info, p);
2136 STATIC_LINK(info,p) = NULL;
2140 /* This function is only needed because we share the mutable link
2141 * field with the static link field in an IND_STATIC, so we have to
2142 * zero the mut_link field before doing a major GC, which needs the
2143 * static link field.
2145 * It doesn't do any harm to zero all the mutable link fields on the
2149 zeroMutableList(StgMutClosure *first)
2151 StgMutClosure *next, *c;
2153 for (c = first; c != END_MUT_LIST; c = next) {
2159 /* -----------------------------------------------------------------------------
2161 -------------------------------------------------------------------------- */
2163 void RevertCAFs(void)
2165 while (enteredCAFs != END_CAF_LIST) {
2166 StgCAF* caf = enteredCAFs;
2168 enteredCAFs = caf->link;
2169 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2170 SET_INFO(caf,&CAF_UNENTERED_info);
2171 caf->value = stgCast(StgClosure*,0xdeadbeef);
2172 caf->link = stgCast(StgCAF*,0xdeadbeef);
2176 void revertDeadCAFs(void)
2178 StgCAF* caf = enteredCAFs;
2179 enteredCAFs = END_CAF_LIST;
2180 while (caf != END_CAF_LIST) {
2181 StgCAF* next = caf->link;
2183 switch(GET_INFO(caf)->type) {
2186 /* This object has been evacuated, it must be live. */
2187 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2188 new->link = enteredCAFs;
2194 SET_INFO(caf,&CAF_UNENTERED_info);
2195 caf->value = stgCast(StgClosure*,0xdeadbeef);
2196 caf->link = stgCast(StgCAF*,0xdeadbeef);
2200 barf("revertDeadCAFs: enteredCAFs list corrupted");
2206 /* -----------------------------------------------------------------------------
2207 Sanity code for CAF garbage collection.
2209 With DEBUG turned on, we manage a CAF list in addition to the SRT
2210 mechanism. After GC, we run down the CAF list and blackhole any
2211 CAFs which have been garbage collected. This means we get an error
2212 whenever the program tries to enter a garbage collected CAF.
2214 Any garbage collected CAFs are taken off the CAF list at the same
2216 -------------------------------------------------------------------------- */
2224 const StgInfoTable *info;
2235 ASSERT(info->type == IND_STATIC);
2237 if (STATIC_LINK(info,p) == NULL) {
2238 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2240 SET_INFO(p,&BLACKHOLE_info);
2241 p = STATIC_LINK2(info,p);
2245 pp = &STATIC_LINK2(info,p);
2252 /* fprintf(stderr, "%d CAFs live\n", i); */
2256 /* -----------------------------------------------------------------------------
2259 Whenever a thread returns to the scheduler after possibly doing
2260 some work, we have to run down the stack and black-hole all the
2261 closures referred to by update frames.
2262 -------------------------------------------------------------------------- */
2265 threadLazyBlackHole(StgTSO *tso)
2267 StgUpdateFrame *update_frame;
2268 StgBlockingQueue *bh;
2271 stack_end = &tso->stack[tso->stack_size];
2272 update_frame = tso->su;
2275 switch (get_itbl(update_frame)->type) {
2278 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2282 bh = (StgBlockingQueue *)update_frame->updatee;
2284 /* if the thunk is already blackholed, it means we've also
2285 * already blackholed the rest of the thunks on this stack,
2286 * so we can stop early.
2289 /* Don't for now: when we enter a CAF, we create a black hole on
2290 * the heap and make the update frame point to it. Thus the
2291 * above optimisation doesn't apply.
2293 if (bh->header.info != &BLACKHOLE_info
2294 && bh->header.info != &BLACKHOLE_BQ_info
2295 && bh->header.info != &CAF_BLACKHOLE_info) {
2296 SET_INFO(bh,&BLACKHOLE_info);
2299 update_frame = update_frame->link;
2303 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2309 barf("threadPaused");
2314 /* -----------------------------------------------------------------------------
2317 * Code largely pinched from old RTS, then hacked to bits. We also do
2318 * lazy black holing here.
2320 * -------------------------------------------------------------------------- */
2323 threadSqueezeStack(StgTSO *tso)
2325 lnat displacement = 0;
2326 StgUpdateFrame *frame;
2327 StgUpdateFrame *next_frame; /* Temporally next */
2328 StgUpdateFrame *prev_frame; /* Temporally previous */
2330 rtsBool prev_was_update_frame;
2332 bottom = &(tso->stack[tso->stack_size]);
2335 /* There must be at least one frame, namely the STOP_FRAME.
2337 ASSERT((P_)frame < bottom);
2339 /* Walk down the stack, reversing the links between frames so that
2340 * we can walk back up as we squeeze from the bottom. Note that
2341 * next_frame and prev_frame refer to next and previous as they were
2342 * added to the stack, rather than the way we see them in this
2343 * walk. (It makes the next loop less confusing.)
2345 * Could stop if we find an update frame pointing to a black hole,
2346 * but see comment in threadLazyBlackHole().
2350 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2351 prev_frame = frame->link;
2352 frame->link = next_frame;
2357 /* Now, we're at the bottom. Frame points to the lowest update
2358 * frame on the stack, and its link actually points to the frame
2359 * above. We have to walk back up the stack, squeezing out empty
2360 * update frames and turning the pointers back around on the way
2363 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2364 * we never want to eliminate it anyway. Just walk one step up
2365 * before starting to squeeze. When you get to the topmost frame,
2366 * remember that there are still some words above it that might have
2373 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2376 * Loop through all of the frames (everything except the very
2377 * bottom). Things are complicated by the fact that we have
2378 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2379 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2381 while (frame != NULL) {
2383 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2384 rtsBool is_update_frame;
2386 next_frame = frame->link;
2387 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2390 * 1. both the previous and current frame are update frames
2391 * 2. the current frame is empty
2393 if (prev_was_update_frame && is_update_frame &&
2394 (P_)prev_frame == frame_bottom + displacement) {
2396 /* Now squeeze out the current frame */
2397 StgClosure *updatee_keep = prev_frame->updatee;
2398 StgClosure *updatee_bypass = frame->updatee;
2401 fprintf(stderr, "squeezing frame at %p\n", frame);
2404 /* Deal with blocking queues. If both updatees have blocked
2405 * threads, then we should merge the queues into the update
2406 * frame that we're keeping.
2408 * Alternatively, we could just wake them up: they'll just go
2409 * straight to sleep on the proper blackhole! This is less code
2410 * and probably less bug prone, although it's probably much
2413 #if 0 /* do it properly... */
2414 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2415 /* Sigh. It has one. Don't lose those threads! */
2416 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2417 /* Urgh. Two queues. Merge them. */
2418 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2420 while (keep_tso->link != END_TSO_QUEUE) {
2421 keep_tso = keep_tso->link;
2423 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2426 /* For simplicity, just swap the BQ for the BH */
2427 P_ temp = updatee_keep;
2429 updatee_keep = updatee_bypass;
2430 updatee_bypass = temp;
2432 /* Record the swap in the kept frame (below) */
2433 prev_frame->updatee = updatee_keep;
2438 TICK_UPD_SQUEEZED();
2439 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2441 sp = (P_)frame - 1; /* sp = stuff to slide */
2442 displacement += sizeofW(StgUpdateFrame);
2445 /* No squeeze for this frame */
2446 sp = frame_bottom - 1; /* Keep the current frame */
2448 /* Do lazy black-holing.
2450 if (is_update_frame) {
2451 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2452 if (bh->header.info != &BLACKHOLE_info
2453 && bh->header.info != &BLACKHOLE_BQ_info
2454 && bh->header.info != &CAF_BLACKHOLE_info
2456 SET_INFO(bh,&BLACKHOLE_info);
2460 /* Fix the link in the current frame (should point to the frame below) */
2461 frame->link = prev_frame;
2462 prev_was_update_frame = is_update_frame;
2465 /* Now slide all words from sp up to the next frame */
2467 if (displacement > 0) {
2468 P_ next_frame_bottom;
2470 if (next_frame != NULL)
2471 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2473 next_frame_bottom = tso->sp - 1;
2476 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2480 while (sp >= next_frame_bottom) {
2481 sp[displacement] = *sp;
2485 (P_)prev_frame = (P_)frame + displacement;
2489 tso->sp += displacement;
2490 tso->su = prev_frame;
2493 /* -----------------------------------------------------------------------------
2496 * We have to prepare for GC - this means doing lazy black holing
2497 * here. We also take the opportunity to do stack squeezing if it's
2499 * -------------------------------------------------------------------------- */
2502 threadPaused(StgTSO *tso)
2504 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2505 threadSqueezeStack(tso); /* does black holing too */
2507 threadLazyBlackHole(tso);