1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.10 1999/01/18 12:23:04 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
26 /* STATIC OBJECT LIST.
29 * We maintain a linked list of static objects that are still live.
30 * The requirements for this list are:
32 * - we need to scan the list while adding to it, in order to
33 * scavenge all the static objects (in the same way that
34 * breadth-first scavenging works for dynamic objects).
36 * - we need to be able to tell whether an object is already on
37 * the list, to break loops.
39 * Each static object has a "static link field", which we use for
40 * linking objects on to the list. We use a stack-type list, consing
41 * objects on the front as they are added (this means that the
42 * scavenge phase is depth-first, not breadth-first, but that
45 * A separate list is kept for objects that have been scavenged
46 * already - this is so that we can zero all the marks afterwards.
48 * An object is on the list if its static link field is non-zero; this
49 * means that we have to mark the end of the list with '1', not NULL.
51 * Extra notes for generational GC:
53 * Each generation has a static object list associated with it. When
54 * collecting generations up to N, we treat the static object lists
55 * from generations > N as roots.
57 * We build up a static object list while collecting generations 0..N,
58 * which is then appended to the static object list of generation N+1.
60 StgClosure* static_objects; /* live static objects */
61 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
63 /* N is the oldest generation being collected, where the generations
64 * are numbered starting at 0. A major GC (indicated by the major_gc
65 * flag) is when we're collecting all generations. We only attempt to
66 * deal with static objects and GC CAFs when doing a major GC.
69 static rtsBool major_gc;
71 /* Youngest generation that objects should be evacuated to in
72 * evacuate(). (Logically an argument to evacuate, but it's static
73 * a lot of the time so we optimise it into a global variable).
79 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
80 static rtsBool weak_done; /* all done for this pass */
82 /* Flag indicating failure to evacuate an object to the desired
85 static rtsBool failed_to_evac;
87 /* -----------------------------------------------------------------------------
88 Static function declarations
89 -------------------------------------------------------------------------- */
91 static StgClosure *evacuate(StgClosure *q);
92 static void zeroStaticObjectList(StgClosure* first_static);
93 static rtsBool traverse_weak_ptr_list(void);
94 static void zeroMutableList(StgMutClosure *first);
95 static void revertDeadCAFs(void);
97 static void scavenge_stack(StgPtr p, StgPtr stack_end);
98 static void scavenge_large(step *step);
99 static void scavenge(step *step);
100 static void scavenge_static(void);
101 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
104 static void gcCAFs(void);
107 /* -----------------------------------------------------------------------------
110 For garbage collecting generation N (and all younger generations):
112 - follow all pointers in the root set. the root set includes all
113 mutable objects in all steps in all generations.
115 - for each pointer, evacuate the object it points to into either
116 + to-space in the next higher step in that generation, if one exists,
117 + if the object's generation == N, then evacuate it to the next
118 generation if one exists, or else to-space in the current
120 + if the object's generation < N, then evacuate it to to-space
121 in the next generation.
123 - repeatedly scavenge to-space from each step in each generation
124 being collected until no more objects can be evacuated.
126 - free from-space in each step, and set from-space = to-space.
128 -------------------------------------------------------------------------- */
130 void GarbageCollect(void (*get_roots)(void))
134 lnat live, allocated, collected = 0;
138 CostCentreStack *prev_CCS;
141 /* tell the stats department that we've started a GC */
144 /* attribute any costs to CCS_GC */
150 /* We might have been called from Haskell land by _ccall_GC, in
151 * which case we need to call threadPaused() because the scheduler
152 * won't have done it.
154 if (CurrentTSO) { threadPaused(CurrentTSO); }
156 /* Approximate how much we allocated: number of blocks in the
157 * nursery + blocks allocated via allocate() - unused nusery blocks.
158 * This leaves a little slop at the end of each block, and doesn't
159 * take into account large objects (ToDo).
161 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
162 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
163 allocated -= BLOCK_SIZE_W;
166 /* Figure out which generation to collect
168 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
169 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
173 major_gc = (N == RtsFlags.GcFlags.generations-1);
175 /* check stack sanity *before* GC (ToDo: check all threads) */
176 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
177 IF_DEBUG(sanity, checkFreeListSanity());
179 /* Initialise the static object lists
181 static_objects = END_OF_STATIC_LIST;
182 scavenged_static_objects = END_OF_STATIC_LIST;
184 /* zero the mutable list for the oldest generation (see comment by
185 * zeroMutableList below).
188 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
191 /* Initialise to-space in all the generations/steps that we're
194 for (g = 0; g <= N; g++) {
195 generations[g].mut_list = END_MUT_LIST;
197 for (s = 0; s < generations[g].n_steps; s++) {
198 /* generation 0, step 0 doesn't need to-space */
199 if (g == 0 && s == 0) { continue; }
200 /* Get a free block for to-space. Extra blocks will be chained on
204 step = &generations[g].steps[s];
205 ASSERT(step->gen->no == g);
206 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
207 bd->gen = &generations[g];
210 bd->evacuated = 1; /* it's a to-space block */
211 step->hp = bd->start;
212 step->hpLim = step->hp + BLOCK_SIZE_W;
215 step->to_blocks = 1; /* ???? */
216 step->scan = bd->start;
218 step->new_large_objects = NULL;
219 step->scavenged_large_objects = NULL;
220 /* mark the large objects as not evacuated yet */
221 for (bd = step->large_objects; bd; bd = bd->link) {
227 /* make sure the older generations have at least one block to
228 * allocate into (this makes things easier for copy(), see below.
230 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
231 for (s = 0; s < generations[g].n_steps; s++) {
232 step = &generations[g].steps[s];
233 if (step->hp_bd == NULL) {
235 bd->gen = &generations[g];
238 bd->evacuated = 0; /* *not* a to-space block */
239 step->hp = bd->start;
240 step->hpLim = step->hp + BLOCK_SIZE_W;
245 /* Set the scan pointer for older generations: remember we
246 * still have to scavenge objects that have been promoted. */
247 step->scan = step->hp;
248 step->scan_bd = step->hp_bd;
249 step->to_space = NULL;
251 step->new_large_objects = NULL;
252 step->scavenged_large_objects = NULL;
254 /* retain these so we can sanity-check later on */
255 step->old_scan = step->scan;
256 step->old_scan_bd = step->scan_bd;
261 /* -----------------------------------------------------------------------
262 * follow all the roots that the application knows about.
267 /* follow all the roots that we know about:
268 * - mutable lists from each generation > N
269 * we want to *scavenge* these roots, not evacuate them: they're not
270 * going to move in this GC.
271 * Also: do them in reverse generation order. This is because we
272 * often want to promote objects that are pointed to by older
273 * generations early, so we don't have to repeatedly copy them.
274 * Doing the generations in reverse order ensures that we don't end
275 * up in the situation where we want to evac an object to gen 3 and
276 * it has already been evaced to gen 2.
279 StgMutClosure *tmp, **pp;
280 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
281 /* the act of scavenging the mutable list for this generation
282 * might place more objects on the mutable list itself. So we
283 * place the current mutable list in a temporary, scavenge it,
284 * and then append it to the new list.
286 tmp = generations[g].mut_list;
287 generations[g].mut_list = END_MUT_LIST;
288 tmp = scavenge_mutable_list(tmp, g);
290 pp = &generations[g].mut_list;
291 while (*pp != END_MUT_LIST) {
292 pp = &(*pp)->mut_link;
297 /* And don't forget to mark the TSO if we got here direct from
300 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
303 /* Mark the weak pointer list, and prepare to detect dead weak
307 old_weak_ptr_list = weak_ptr_list;
308 weak_ptr_list = NULL;
309 weak_done = rtsFalse;
313 /* ToDo: To fix the caf leak, we need to make the commented out
314 * parts of this code do something sensible - as described in
317 extern void markHugsObjects(void);
319 /* ToDo: This (undefined) function should contain the scavenge
320 * loop immediately below this block of code - but I'm not sure
321 * enough of the details to do this myself.
323 scavengeEverything();
324 /* revert dead CAFs and update enteredCAFs list */
329 /* This will keep the CAFs and the attached BCOs alive
330 * but the values will have been reverted
332 scavengeEverything();
337 /* -------------------------------------------------------------------------
338 * Repeatedly scavenge all the areas we know about until there's no
339 * more scavenging to be done.
346 /* scavenge static objects */
347 if (major_gc && static_objects != END_OF_STATIC_LIST) {
351 /* When scavenging the older generations: Objects may have been
352 * evacuated from generations <= N into older generations, and we
353 * need to scavenge these objects. We're going to try to ensure that
354 * any evacuations that occur move the objects into at least the
355 * same generation as the object being scavenged, otherwise we
356 * have to create new entries on the mutable list for the older
360 /* scavenge each step in generations 0..maxgen */
363 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
364 for (s = 0; s < generations[gen].n_steps; s++) {
365 step = &generations[gen].steps[s];
367 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
371 if (step->new_large_objects != NULL) {
372 scavenge_large(step);
378 if (flag) { goto loop; }
380 /* must be last... */
381 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
386 /* Set the maximum blocks for the oldest generation, based on twice
387 * the amount of live data now, adjusted to fit the maximum heap
390 * This is an approximation, since in the worst case we'll need
391 * twice the amount of live data plus whatever space the other
394 oldest_gen->max_blocks =
395 stg_max(oldest_gen->steps[0].to_blocks * 2,
396 RtsFlags.GcFlags.minAllocAreaSize * 4);
397 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
398 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
399 if (((int)oldest_gen->max_blocks - (int)oldest_gen->steps[0].to_blocks) <
400 (RtsFlags.GcFlags.pcFreeHeap *
401 RtsFlags.GcFlags.maxHeapSize / 200)) {
406 /* run through all the generations/steps and tidy up
408 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
411 generations[g].collections++; /* for stats */
414 for (s = 0; s < generations[g].n_steps; s++) {
416 step = &generations[g].steps[s];
418 if (!(g == 0 && s == 0)) {
419 /* Tidy the end of the to-space chains */
420 step->hp_bd->free = step->hp;
421 step->hp_bd->link = NULL;
424 /* for generations we collected... */
427 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
429 /* free old memory and shift to-space into from-space for all
430 * the collected steps (except the allocation area). These
431 * freed blocks will probaby be quickly recycled.
433 if (!(g == 0 && s == 0)) {
434 freeChain(step->blocks);
435 step->blocks = step->to_space;
436 step->n_blocks = step->to_blocks;
437 step->to_space = NULL;
439 for (bd = step->blocks; bd != NULL; bd = bd->link) {
440 bd->evacuated = 0; /* now from-space */
444 /* LARGE OBJECTS. The current live large objects are chained on
445 * scavenged_large, having been moved during garbage
446 * collection from large_objects. Any objects left on
447 * large_objects list are therefore dead, so we free them here.
449 for (bd = step->large_objects; bd != NULL; bd = next) {
454 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
457 step->large_objects = step->scavenged_large_objects;
459 /* Set the maximum blocks for this generation, interpolating
460 * between the maximum size of the oldest and youngest
463 * max_blocks = alloc_area_size +
464 * (oldgen_max_blocks - alloc_area_size) * G
465 * -----------------------------------------
469 generations[g].max_blocks =
470 RtsFlags.GcFlags.minAllocAreaSize +
471 (((oldest_gen->max_blocks - RtsFlags.GcFlags.minAllocAreaSize) * g)
472 / (RtsFlags.GcFlags.generations-1));
475 /* for older generations... */
478 /* For older generations, we need to append the
479 * scavenged_large_object list (i.e. large objects that have been
480 * promoted during this GC) to the large_object list for that step.
482 for (bd = step->scavenged_large_objects; bd; bd = next) {
485 dbl_link_onto(bd, &step->large_objects);
488 /* add the new blocks we promoted during this GC */
489 step->n_blocks += step->to_blocks;
494 /* revert dead CAFs and update enteredCAFs list */
497 /* mark the garbage collected CAFs as dead */
499 if (major_gc) { gcCAFs(); }
502 /* zero the scavenged static object list */
504 zeroStaticObjectList(scavenged_static_objects);
509 for (bd = g0s0->blocks; bd; bd = bd->link) {
510 bd->free = bd->start;
511 ASSERT(bd->gen == g0);
512 ASSERT(bd->step == g0s0);
514 current_nursery = g0s0->blocks;
517 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
518 for (s = 0; s < generations[g].n_steps; s++) {
519 /* approximate amount of live data (doesn't take into account slop
520 * at end of each block). ToDo: this more accurately.
522 if (g == 0 && s == 0) { continue; }
523 step = &generations[g].steps[s];
524 live += step->n_blocks * BLOCK_SIZE_W +
525 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
529 /* Free the small objects allocated via allocate(), since this will
530 * all have been copied into G0S1 now.
532 if (small_alloc_list != NULL) {
533 freeChain(small_alloc_list);
535 small_alloc_list = NULL;
537 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
539 /* start any pending finalisers */
540 scheduleFinalisers(old_weak_ptr_list);
542 /* check sanity after GC */
544 for (g = 0; g <= N; g++) {
545 for (s = 0; s < generations[g].n_steps; s++) {
546 if (g == 0 && s == 0) { continue; }
547 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
548 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
551 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
552 for (s = 0; s < generations[g].n_steps; s++) {
553 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].old_scan_bd,
554 generations[g].steps[s].old_scan));
555 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
558 IF_DEBUG(sanity, checkFreeListSanity());
561 IF_DEBUG(gc, stat_describe_gens());
564 /* symbol-table based profiling */
565 /* heapCensus(to_space); */ /* ToDo */
568 /* restore enclosing cost centre */
573 /* check for memory leaks if sanity checking is on */
574 IF_DEBUG(sanity, memInventory());
576 /* ok, GC over: tell the stats department what happened. */
577 stat_endGC(allocated, collected, live, N);
580 /* -----------------------------------------------------------------------------
583 traverse_weak_ptr_list is called possibly many times during garbage
584 collection. It returns a flag indicating whether it did any work
585 (i.e. called evacuate on any live pointers).
587 Invariant: traverse_weak_ptr_list is called when the heap is in an
588 idempotent state. That means that there are no pending
589 evacuate/scavenge operations. This invariant helps the weak
590 pointer code decide which weak pointers are dead - if there are no
591 new live weak pointers, then all the currently unreachable ones are
594 For generational GC: we just don't try to finalise weak pointers in
595 older generations than the one we're collecting. This could
596 probably be optimised by keeping per-generation lists of weak
597 pointers, but for a few weak pointers this scheme will work.
598 -------------------------------------------------------------------------- */
601 traverse_weak_ptr_list(void)
603 StgWeak *w, **last_w, *next_w;
605 const StgInfoTable *info;
606 rtsBool flag = rtsFalse;
608 if (weak_done) { return rtsFalse; }
610 /* doesn't matter where we evacuate values/finalisers to, since
611 * these pointers are treated as roots (iff the keys are alive).
615 last_w = &old_weak_ptr_list;
616 for (w = old_weak_ptr_list; w; w = next_w) {
619 /* ignore weak pointers in older generations */
620 if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) {
621 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive (in old gen) at %p\n", w));
622 /* remove this weak ptr from the old_weak_ptr list */
624 /* and put it on the new weak ptr list */
626 w->link = weak_ptr_list;
632 info = get_itbl(target);
633 switch (info->type) {
638 case IND_OLDGEN: /* rely on compatible layout with StgInd */
639 case IND_OLDGEN_PERM:
640 /* follow indirections */
641 target = ((StgInd *)target)->indirectee;
645 /* If key is alive, evacuate value and finaliser and
646 * place weak ptr on new weak ptr list.
648 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w));
649 w->key = ((StgEvacuated *)target)->evacuee;
650 w->value = evacuate(w->value);
651 w->finaliser = evacuate(w->finaliser);
653 /* remove this weak ptr from the old_weak_ptr list */
656 /* and put it on the new weak ptr list */
658 w->link = weak_ptr_list;
663 default: /* key is dead */
670 /* If we didn't make any changes, then we can go round and kill all
671 * the dead weak pointers. The old_weak_ptr list is used as a list
672 * of pending finalisers later on.
674 if (flag == rtsFalse) {
675 for (w = old_weak_ptr_list; w; w = w->link) {
676 w->value = evacuate(w->value);
677 w->finaliser = evacuate(w->finaliser);
686 MarkRoot(StgClosure *root)
688 root = evacuate(root);
692 static inline void addBlock(step *step)
694 bdescr *bd = allocBlock();
698 if (step->gen->no <= N) {
704 step->hp_bd->free = step->hp;
705 step->hp_bd->link = bd;
706 step->hp = bd->start;
707 step->hpLim = step->hp + BLOCK_SIZE_W;
712 static __inline__ StgClosure *
713 copy(StgClosure *src, W_ size, bdescr *bd)
718 /* Find out where we're going, using the handy "to" pointer in
719 * the step of the source object. If it turns out we need to
720 * evacuate to an older generation, adjust it here (see comment
724 if (step->gen->no < evac_gen) {
725 step = &generations[evac_gen].steps[0];
728 /* chain a new block onto the to-space for the destination step if
731 if (step->hp + size >= step->hpLim) {
737 for(to = dest, from = (P_)src; size>0; --size) {
740 return (StgClosure *)dest;
743 static __inline__ void
744 upd_evacuee(StgClosure *p, StgClosure *dest)
746 StgEvacuated *q = (StgEvacuated *)p;
748 SET_INFO(q,&EVACUATED_info);
752 /* -----------------------------------------------------------------------------
753 Evacuate a mutable object
755 If we evacuate a mutable object to an old generation, cons the
756 object onto the older generation's mutable list.
757 -------------------------------------------------------------------------- */
760 evacuate_mutable(StgMutClosure *c)
765 if (bd->gen->no > 0) {
766 c->mut_link = bd->gen->mut_list;
767 bd->gen->mut_list = c;
771 /* -----------------------------------------------------------------------------
772 Evacuate a large object
774 This just consists of removing the object from the (doubly-linked)
775 large_alloc_list, and linking it on to the (singly-linked)
776 new_large_objects list, from where it will be scavenged later.
778 Convention: bd->evacuated is /= 0 for a large object that has been
779 evacuated, or 0 otherwise.
780 -------------------------------------------------------------------------- */
783 evacuate_large(StgPtr p, rtsBool mutable)
785 bdescr *bd = Bdescr(p);
788 /* should point to the beginning of the block */
789 ASSERT(((W_)p & BLOCK_MASK) == 0);
791 /* already evacuated? */
793 /* Don't forget to set the failed_to_evac flag if we didn't get
794 * the desired destination (see comments in evacuate()).
796 if (bd->gen->no < evac_gen) {
797 failed_to_evac = rtsTrue;
803 /* remove from large_object list */
805 bd->back->link = bd->link;
806 } else { /* first object in the list */
807 step->large_objects = bd->link;
810 bd->link->back = bd->back;
813 /* link it on to the evacuated large object list of the destination step
816 if (step->gen->no < evac_gen) {
817 step = &generations[evac_gen].steps[0];
822 bd->link = step->new_large_objects;
823 step->new_large_objects = bd;
827 evacuate_mutable((StgMutClosure *)p);
831 /* -----------------------------------------------------------------------------
832 Adding a MUT_CONS to an older generation.
834 This is necessary from time to time when we end up with an
835 old-to-new generation pointer in a non-mutable object. We defer
836 the promotion until the next GC.
837 -------------------------------------------------------------------------- */
840 mkMutCons(StgClosure *ptr, generation *gen)
845 step = &gen->steps[0];
847 /* chain a new block onto the to-space for the destination step if
850 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
854 q = (StgMutVar *)step->hp;
855 step->hp += sizeofW(StgMutVar);
857 SET_HDR(q,&MUT_CONS_info,CCS_GC);
859 evacuate_mutable((StgMutClosure *)q);
861 return (StgClosure *)q;
864 /* -----------------------------------------------------------------------------
867 This is called (eventually) for every live object in the system.
869 The caller to evacuate specifies a desired generation in the
870 evac_gen global variable. The following conditions apply to
871 evacuating an object which resides in generation M when we're
872 collecting up to generation N
876 else evac to step->to
878 if M < evac_gen evac to evac_gen, step 0
880 if the object is already evacuated, then we check which generation
883 if M >= evac_gen do nothing
884 if M < evac_gen set failed_to_evac flag to indicate that we
885 didn't manage to evacuate this object into evac_gen.
887 -------------------------------------------------------------------------- */
891 evacuate(StgClosure *q)
895 const StgInfoTable *info;
898 if (!LOOKS_LIKE_STATIC(q)) {
900 if (bd->gen->no > N) {
901 /* Can't evacuate this object, because it's in a generation
902 * older than the ones we're collecting. Let's hope that it's
903 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
905 if (bd->gen->no < evac_gen) {
907 failed_to_evac = rtsTrue;
913 /* make sure the info pointer is into text space */
914 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
915 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
918 switch (info -> type) {
921 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),bd);
927 to = copy(q,sizeW_fromITBL(info),bd);
929 evacuate_mutable((StgMutClosure *)to);
936 case IND_OLDGEN_PERM:
941 to = copy(q,sizeW_fromITBL(info),bd);
948 /* ToDo: don't need to copy all the blackhole, some of it is
951 to = copy(q,BLACKHOLE_sizeW(),bd);
957 const StgInfoTable* selectee_info;
958 StgClosure* selectee = ((StgSelector*)q)->selectee;
961 selectee_info = get_itbl(selectee);
962 switch (selectee_info->type) {
966 StgNat32 offset = info->layout.selector_offset;
968 /* check that the size is in range */
970 (StgNat32)(selectee_info->layout.payload.ptrs +
971 selectee_info->layout.payload.nptrs));
973 /* perform the selection! */
974 q = selectee->payload[offset];
976 /* if we're already in to-space, there's no need to continue
977 * with the evacuation, just update the source address with
978 * a pointer to the (evacuated) constructor field.
980 if (IS_USER_PTR(q)) {
981 bdescr *bd = Bdescr((P_)q);
983 if (bd->gen->no < evac_gen) {
984 failed_to_evac = rtsTrue;
990 /* otherwise, carry on and evacuate this constructor field,
991 * (but not the constructor itself)
1000 case IND_OLDGEN_PERM:
1001 selectee = stgCast(StgInd *,selectee)->indirectee;
1005 selectee = stgCast(StgCAF *,selectee)->value;
1009 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1014 case THUNK_SELECTOR:
1015 /* aargh - do recursively???? */
1020 /* not evaluated yet */
1024 barf("evacuate: THUNK_SELECTOR: strange selectee");
1027 to = copy(q,THUNK_SELECTOR_sizeW(),bd);
1033 /* follow chains of indirections, don't evacuate them */
1034 q = ((StgInd*)q)->indirectee;
1037 /* ToDo: optimise STATIC_LINK for known cases.
1038 - FUN_STATIC : payload[0]
1039 - THUNK_STATIC : payload[1]
1040 - IND_STATIC : payload[1]
1044 if (info->srt_len == 0) { /* small optimisation */
1050 /* don't want to evacuate these, but we do want to follow pointers
1051 * from SRTs - see scavenge_static.
1054 /* put the object on the static list, if necessary.
1056 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1057 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1058 static_objects = (StgClosure *)q;
1062 case CONSTR_INTLIKE:
1063 case CONSTR_CHARLIKE:
1064 case CONSTR_NOCAF_STATIC:
1065 /* no need to put these on the static linked list, they don't need
1080 /* shouldn't see these */
1081 barf("evacuate: stack frame\n");
1085 /* these are special - the payload is a copy of a chunk of stack,
1087 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),bd);
1092 /* Already evacuated, just return the forwarding address.
1093 * HOWEVER: if the requested destination generation (evac_gen) is
1094 * older than the actual generation (because the object was
1095 * already evacuated to a younger generation) then we have to
1096 * set the failed_to_evac flag to indicate that we couldn't
1097 * manage to promote the object to the desired generation.
1099 if (evac_gen > 0) { /* optimisation */
1100 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1101 if (Bdescr((P_)p)->gen->no < evac_gen) {
1102 /* fprintf(stderr,"evac failed!\n");*/
1103 failed_to_evac = rtsTrue;
1106 return ((StgEvacuated*)q)->evacuee;
1111 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1113 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1114 evacuate_large((P_)q, rtsFalse);
1117 /* just copy the block */
1118 to = copy(q,size,bd);
1125 case MUT_ARR_PTRS_FROZEN:
1127 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1129 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1130 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1133 /* just copy the block */
1134 to = copy(q,size,bd);
1136 if (info->type == MUT_ARR_PTRS) {
1137 evacuate_mutable((StgMutClosure *)to);
1145 StgTSO *tso = stgCast(StgTSO *,q);
1146 nat size = tso_sizeW(tso);
1149 /* Large TSOs don't get moved, so no relocation is required.
1151 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1152 evacuate_large((P_)q, rtsFalse);
1153 tso->mut_link = NULL; /* see below */
1156 /* To evacuate a small TSO, we need to relocate the update frame
1160 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),bd);
1162 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1164 /* relocate the stack pointers... */
1165 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1166 new_tso->sp = (StgPtr)new_tso->sp + diff;
1167 new_tso->splim = (StgPtr)new_tso->splim + diff;
1169 relocate_TSO(tso, new_tso);
1170 upd_evacuee(q,(StgClosure *)new_tso);
1172 /* don't evac_mutable - these things are marked mutable as
1173 * required. We *do* need to zero the mut_link field, though:
1174 * this TSO might have been on the mutable list for this
1175 * generation, but we're collecting this generation anyway so
1176 * we didn't follow the mutable list.
1178 new_tso->mut_link = NULL;
1180 return (StgClosure *)new_tso;
1186 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1190 barf("evacuate: strange closure type");
1196 /* -----------------------------------------------------------------------------
1197 relocate_TSO is called just after a TSO has been copied from src to
1198 dest. It adjusts the update frame list for the new location.
1199 -------------------------------------------------------------------------- */
1202 relocate_TSO(StgTSO *src, StgTSO *dest)
1209 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1213 while ((P_)su < dest->stack + dest->stack_size) {
1214 switch (get_itbl(su)->type) {
1216 /* GCC actually manages to common up these three cases! */
1219 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1224 cf = (StgCatchFrame *)su;
1225 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1230 sf = (StgSeqFrame *)su;
1231 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1240 barf("relocate_TSO");
1249 scavenge_srt(const StgInfoTable *info)
1251 StgClosure **srt, **srt_end;
1253 /* evacuate the SRT. If srt_len is zero, then there isn't an
1254 * srt field in the info table. That's ok, because we'll
1255 * never dereference it.
1257 srt = stgCast(StgClosure **,info->srt);
1258 srt_end = srt + info->srt_len;
1259 for (; srt < srt_end; srt++) {
1264 /* -----------------------------------------------------------------------------
1265 Scavenge a given step until there are no more objects in this step
1268 evac_gen is set by the caller to be either zero (for a step in a
1269 generation < N) or G where G is the generation of the step being
1272 We sometimes temporarily change evac_gen back to zero if we're
1273 scavenging a mutable object where early promotion isn't such a good
1275 -------------------------------------------------------------------------- */
1279 scavenge(step *step)
1282 const StgInfoTable *info;
1284 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1289 failed_to_evac = rtsFalse;
1291 /* scavenge phase - standard breadth-first scavenging of the
1295 while (bd != step->hp_bd || p < step->hp) {
1297 /* If we're at the end of this block, move on to the next block */
1298 if (bd != step->hp_bd && p == bd->free) {
1304 q = p; /* save ptr to object */
1306 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1307 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1309 info = get_itbl((StgClosure *)p);
1310 switch (info -> type) {
1314 StgBCO* bco = stgCast(StgBCO*,p);
1316 for (i = 0; i < bco->n_ptrs; i++) {
1317 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1319 p += bco_sizeW(bco);
1324 /* treat MVars specially, because we don't want to evacuate the
1325 * mut_link field in the middle of the closure.
1328 StgMVar *mvar = ((StgMVar *)p);
1330 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1331 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1332 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1333 p += sizeofW(StgMVar);
1334 evac_gen = saved_evac_gen;
1347 case IND_OLDGEN_PERM:
1353 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1354 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1355 (StgClosure *)*p = evacuate((StgClosure *)*p);
1357 p += info->layout.payload.nptrs;
1362 /* ignore MUT_CONSs */
1363 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1365 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1366 evac_gen = saved_evac_gen;
1368 p += sizeofW(StgMutVar);
1373 p += BLACKHOLE_sizeW();
1378 StgBlackHole *bh = (StgBlackHole *)p;
1379 (StgClosure *)bh->blocking_queue =
1380 evacuate((StgClosure *)bh->blocking_queue);
1381 p += BLACKHOLE_sizeW();
1385 case THUNK_SELECTOR:
1387 StgSelector *s = (StgSelector *)p;
1388 s->selectee = evacuate(s->selectee);
1389 p += THUNK_SELECTOR_sizeW();
1395 barf("scavenge:IND???\n");
1397 case CONSTR_INTLIKE:
1398 case CONSTR_CHARLIKE:
1400 case CONSTR_NOCAF_STATIC:
1404 /* Shouldn't see a static object here. */
1405 barf("scavenge: STATIC object\n");
1417 /* Shouldn't see stack frames here. */
1418 barf("scavenge: stack frame\n");
1420 case AP_UPD: /* same as PAPs */
1422 /* Treat a PAP just like a section of stack, not forgetting to
1423 * evacuate the function pointer too...
1426 StgPAP* pap = stgCast(StgPAP*,p);
1428 pap->fun = evacuate(pap->fun);
1429 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1430 p += pap_sizeW(pap);
1436 /* nothing to follow */
1437 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1441 /* follow everything */
1445 evac_gen = 0; /* repeatedly mutable */
1446 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1447 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1448 (StgClosure *)*p = evacuate((StgClosure *)*p);
1450 evac_gen = saved_evac_gen;
1454 case MUT_ARR_PTRS_FROZEN:
1455 /* follow everything */
1457 StgPtr start = p, next;
1459 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1460 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1461 (StgClosure *)*p = evacuate((StgClosure *)*p);
1463 if (failed_to_evac) {
1464 /* we can do this easier... */
1465 evacuate_mutable((StgMutClosure *)start);
1466 failed_to_evac = rtsFalse;
1477 /* chase the link field for any TSOs on the same queue */
1478 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1479 /* scavenge this thread's stack */
1480 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1481 evac_gen = saved_evac_gen;
1482 p += tso_sizeW(tso);
1489 barf("scavenge: unimplemented/strange closure type\n");
1495 /* If we didn't manage to promote all the objects pointed to by
1496 * the current object, then we have to designate this object as
1497 * mutable (because it contains old-to-new generation pointers).
1499 if (failed_to_evac) {
1500 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1501 failed_to_evac = rtsFalse;
1509 /* -----------------------------------------------------------------------------
1510 Scavenge one object.
1512 This is used for objects that are temporarily marked as mutable
1513 because they contain old-to-new generation pointers. Only certain
1514 objects can have this property.
1515 -------------------------------------------------------------------------- */
1517 scavenge_one(StgPtr p)
1522 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1523 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1525 info = get_itbl((StgClosure *)p);
1527 switch (info -> type) {
1535 case IND_OLDGEN_PERM:
1541 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1542 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1543 (StgClosure *)*p = evacuate((StgClosure *)*p);
1554 StgBlackHole *bh = (StgBlackHole *)p;
1555 (StgClosure *)bh->blocking_queue =
1556 evacuate((StgClosure *)bh->blocking_queue);
1560 case THUNK_SELECTOR:
1562 StgSelector *s = (StgSelector *)p;
1563 s->selectee = evacuate(s->selectee);
1567 case AP_UPD: /* same as PAPs */
1569 /* Treat a PAP just like a section of stack, not forgetting to
1570 * evacuate the function pointer too...
1573 StgPAP* pap = stgCast(StgPAP*,p);
1575 pap->fun = evacuate(pap->fun);
1576 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1581 /* This might happen if for instance a MUT_CONS was pointing to a
1582 * THUNK which has since been updated. The IND_OLDGEN will
1583 * be on the mutable list anyway, so we don't need to do anything
1589 barf("scavenge_one: strange object");
1592 no_luck = failed_to_evac;
1593 failed_to_evac = rtsFalse;
1598 /* -----------------------------------------------------------------------------
1599 Scavenging mutable lists.
1601 We treat the mutable list of each generation > N (i.e. all the
1602 generations older than the one being collected) as roots. We also
1603 remove non-mutable objects from the mutable list at this point.
1604 -------------------------------------------------------------------------- */
1606 static StgMutClosure *
1607 scavenge_mutable_list(StgMutClosure *p, nat gen)
1610 StgMutClosure *start;
1611 StgMutClosure **prev;
1618 failed_to_evac = rtsFalse;
1620 for (; p != END_MUT_LIST; p = *prev) {
1622 /* make sure the info pointer is into text space */
1623 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1624 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1627 switch(info->type) {
1629 case MUT_ARR_PTRS_FROZEN:
1630 /* remove this guy from the mutable list, but follow the ptrs
1631 * anyway (and make sure they get promoted to this gen).
1636 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1638 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1639 (StgClosure *)*q = evacuate((StgClosure *)*q);
1643 if (failed_to_evac) {
1644 failed_to_evac = rtsFalse;
1645 prev = &p->mut_link;
1647 *prev = p->mut_link;
1653 /* follow everything */
1654 prev = &p->mut_link;
1658 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1659 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1660 (StgClosure *)*q = evacuate((StgClosure *)*q);
1666 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1667 * it from the mutable list if possible by promoting whatever it
1670 if (p->header.info == &MUT_CONS_info) {
1672 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1673 /* didn't manage to promote everything, so leave the
1674 * MUT_CONS on the list.
1676 prev = &p->mut_link;
1678 *prev = p->mut_link;
1682 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1683 prev = &p->mut_link;
1689 StgMVar *mvar = (StgMVar *)p;
1690 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1691 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1692 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1693 prev = &p->mut_link;
1698 /* follow ptrs and remove this from the mutable list */
1700 StgTSO *tso = (StgTSO *)p;
1702 /* Don't bother scavenging if this thread is dead
1704 if (!(tso->whatNext == ThreadComplete ||
1705 tso->whatNext == ThreadKilled)) {
1706 /* Don't need to chase the link field for any TSOs on the
1707 * same queue. Just scavenge this thread's stack
1709 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1712 /* Don't take this TSO off the mutable list - it might still
1713 * point to some younger objects (because we set evac_gen to 0
1716 prev = &tso->mut_link;
1721 case IND_OLDGEN_PERM:
1723 /* Try to pull the indirectee into this generation, so we can
1724 * remove the indirection from the mutable list.
1727 ((StgIndOldGen *)p)->indirectee =
1728 evacuate(((StgIndOldGen *)p)->indirectee);
1731 if (failed_to_evac) {
1732 failed_to_evac = rtsFalse;
1733 prev = &p->mut_link;
1735 *prev = p->mut_link;
1736 /* the mut_link field of an IND_STATIC is overloaded as the
1737 * static link field too (it just so happens that we don't need
1738 * both at the same time), so we need to NULL it out when
1739 * removing this object from the mutable list because the static
1740 * link fields are all assumed to be NULL before doing a major
1748 /* shouldn't have anything else on the mutables list */
1749 barf("scavenge_mutable_object: non-mutable object?");
1756 scavenge_static(void)
1758 StgClosure* p = static_objects;
1759 const StgInfoTable *info;
1761 /* Always evacuate straight to the oldest generation for static
1763 evac_gen = oldest_gen->no;
1765 /* keep going until we've scavenged all the objects on the linked
1767 while (p != END_OF_STATIC_LIST) {
1771 /* make sure the info pointer is into text space */
1772 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1773 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1775 /* Take this object *off* the static_objects list,
1776 * and put it on the scavenged_static_objects list.
1778 static_objects = STATIC_LINK(info,p);
1779 STATIC_LINK(info,p) = scavenged_static_objects;
1780 scavenged_static_objects = p;
1782 switch (info -> type) {
1786 StgInd *ind = (StgInd *)p;
1787 ind->indirectee = evacuate(ind->indirectee);
1789 /* might fail to evacuate it, in which case we have to pop it
1790 * back on the mutable list (and take it off the
1791 * scavenged_static list because the static link and mut link
1792 * pointers are one and the same).
1794 if (failed_to_evac) {
1795 failed_to_evac = rtsFalse;
1796 scavenged_static_objects = STATIC_LINK(info,p);
1797 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
1798 oldest_gen->mut_list = (StgMutClosure *)ind;
1812 next = (P_)p->payload + info->layout.payload.ptrs;
1813 /* evacuate the pointers */
1814 for (q = (P_)p->payload; q < next; q++) {
1815 (StgClosure *)*q = evacuate((StgClosure *)*q);
1821 barf("scavenge_static");
1824 ASSERT(failed_to_evac == rtsFalse);
1826 /* get the next static object from the list. Remeber, there might
1827 * be more stuff on this list now that we've done some evacuating!
1828 * (static_objects is a global)
1834 /* -----------------------------------------------------------------------------
1835 scavenge_stack walks over a section of stack and evacuates all the
1836 objects pointed to by it. We can use the same code for walking
1837 PAPs, since these are just sections of copied stack.
1838 -------------------------------------------------------------------------- */
1841 scavenge_stack(StgPtr p, StgPtr stack_end)
1844 const StgInfoTable* info;
1848 * Each time around this loop, we are looking at a chunk of stack
1849 * that starts with either a pending argument section or an
1850 * activation record.
1853 while (p < stack_end) {
1854 q = *stgCast(StgPtr*,p);
1856 /* If we've got a tag, skip over that many words on the stack */
1857 if (IS_ARG_TAG(stgCast(StgWord,q))) {
1862 /* Is q a pointer to a closure?
1864 if (! LOOKS_LIKE_GHC_INFO(q)) {
1867 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
1868 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
1870 /* otherwise, must be a pointer into the allocation space.
1874 (StgClosure *)*p = evacuate((StgClosure *)q);
1880 * Otherwise, q must be the info pointer of an activation
1881 * record. All activation records have 'bitmap' style layout
1884 info = get_itbl(stgCast(StgClosure*,p));
1886 switch (info->type) {
1888 /* Dynamic bitmap: the mask is stored on the stack */
1890 bitmap = stgCast(StgRetDyn*,p)->liveness;
1891 p = &payloadWord(stgCast(StgRetDyn*,p),0);
1894 /* probably a slow-entry point return address: */
1900 /* Specialised code for update frames, since they're so common.
1901 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
1902 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
1906 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1908 StgClosureType type = get_itbl(frame->updatee)->type;
1910 p += sizeofW(StgUpdateFrame);
1911 if (type == EVACUATED) {
1912 frame->updatee = evacuate(frame->updatee);
1915 bdescr *bd = Bdescr((P_)frame->updatee);
1916 ASSERT(type == BLACKHOLE ||
1917 type == CAF_BLACKHOLE ||
1918 type == BLACKHOLE_BQ);
1919 if (bd->gen->no > N) {
1920 if (bd->gen->no < evac_gen) {
1921 failed_to_evac = rtsTrue;
1925 to = copy(frame->updatee, BLACKHOLE_sizeW(), bd);
1926 upd_evacuee(frame->updatee,to);
1927 frame->updatee = to;
1932 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
1939 bitmap = info->layout.bitmap;
1942 while (bitmap != 0) {
1943 if ((bitmap & 1) == 0) {
1944 (StgClosure *)*p = evacuate((StgClosure *)*p);
1947 bitmap = bitmap >> 1;
1954 /* large bitmap (> 32 entries) */
1959 StgLargeBitmap *large_bitmap;
1962 large_bitmap = info->layout.large_bitmap;
1965 for (i=0; i<large_bitmap->size; i++) {
1966 bitmap = large_bitmap->bitmap[i];
1967 q = p + sizeof(W_) * 8;
1968 while (bitmap != 0) {
1969 if ((bitmap & 1) == 0) {
1970 (StgClosure *)*p = evacuate((StgClosure *)*p);
1973 bitmap = bitmap >> 1;
1975 if (i+1 < large_bitmap->size) {
1977 (StgClosure *)*p = evacuate((StgClosure *)*p);
1983 /* and don't forget to follow the SRT */
1988 barf("scavenge_stack: weird activation record found on stack.\n");
1993 /*-----------------------------------------------------------------------------
1994 scavenge the large object list.
1996 evac_gen set by caller; similar games played with evac_gen as with
1997 scavenge() - see comment at the top of scavenge(). Most large
1998 objects are (repeatedly) mutable, so most of the time evac_gen will
2000 --------------------------------------------------------------------------- */
2003 scavenge_large(step *step)
2007 const StgInfoTable* info;
2008 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2010 evac_gen = 0; /* most objects are mutable */
2011 bd = step->new_large_objects;
2013 for (; bd != NULL; bd = step->new_large_objects) {
2015 /* take this object *off* the large objects list and put it on
2016 * the scavenged large objects list. This is so that we can
2017 * treat new_large_objects as a stack and push new objects on
2018 * the front when evacuating.
2020 step->new_large_objects = bd->link;
2021 dbl_link_onto(bd, &step->scavenged_large_objects);
2024 info = get_itbl(stgCast(StgClosure*,p));
2026 switch (info->type) {
2028 /* only certain objects can be "large"... */
2032 /* nothing to follow */
2036 /* follow everything */
2040 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2041 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2042 (StgClosure *)*p = evacuate((StgClosure *)*p);
2047 case MUT_ARR_PTRS_FROZEN:
2048 /* follow everything */
2050 StgPtr start = p, next;
2052 evac_gen = saved_evac_gen; /* not really mutable */
2053 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2054 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2055 (StgClosure *)*p = evacuate((StgClosure *)*p);
2058 if (failed_to_evac) {
2059 evacuate_mutable((StgMutClosure *)start);
2066 StgBCO* bco = stgCast(StgBCO*,p);
2068 evac_gen = saved_evac_gen;
2069 for (i = 0; i < bco->n_ptrs; i++) {
2070 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2081 /* chase the link field for any TSOs on the same queue */
2082 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2083 /* scavenge this thread's stack */
2084 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2089 barf("scavenge_large: unknown/strange object");
2095 zeroStaticObjectList(StgClosure* first_static)
2099 const StgInfoTable *info;
2101 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2103 link = STATIC_LINK(info, p);
2104 STATIC_LINK(info,p) = NULL;
2108 /* This function is only needed because we share the mutable link
2109 * field with the static link field in an IND_STATIC, so we have to
2110 * zero the mut_link field before doing a major GC, which needs the
2111 * static link field.
2113 * It doesn't do any harm to zero all the mutable link fields on the
2117 zeroMutableList(StgMutClosure *first)
2119 StgMutClosure *next, *c;
2121 for (c = first; c != END_MUT_LIST; c = next) {
2127 /* -----------------------------------------------------------------------------
2129 -------------------------------------------------------------------------- */
2131 void RevertCAFs(void)
2133 while (enteredCAFs != END_CAF_LIST) {
2134 StgCAF* caf = enteredCAFs;
2136 enteredCAFs = caf->link;
2137 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2138 SET_INFO(caf,&CAF_UNENTERED_info);
2139 caf->value = stgCast(StgClosure*,0xdeadbeef);
2140 caf->link = stgCast(StgCAF*,0xdeadbeef);
2144 void revertDeadCAFs(void)
2146 StgCAF* caf = enteredCAFs;
2147 enteredCAFs = END_CAF_LIST;
2148 while (caf != END_CAF_LIST) {
2149 StgCAF* next = caf->link;
2151 switch(GET_INFO(caf)->type) {
2154 /* This object has been evacuated, it must be live. */
2155 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2156 new->link = enteredCAFs;
2162 SET_INFO(caf,&CAF_UNENTERED_info);
2163 caf->value = stgCast(StgClosure*,0xdeadbeef);
2164 caf->link = stgCast(StgCAF*,0xdeadbeef);
2168 barf("revertDeadCAFs: enteredCAFs list corrupted");
2174 /* -----------------------------------------------------------------------------
2175 Sanity code for CAF garbage collection.
2177 With DEBUG turned on, we manage a CAF list in addition to the SRT
2178 mechanism. After GC, we run down the CAF list and blackhole any
2179 CAFs which have been garbage collected. This means we get an error
2180 whenever the program tries to enter a garbage collected CAF.
2182 Any garbage collected CAFs are taken off the CAF list at the same
2184 -------------------------------------------------------------------------- */
2192 const StgInfoTable *info;
2203 ASSERT(info->type == IND_STATIC);
2205 if (STATIC_LINK(info,p) == NULL) {
2206 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2208 SET_INFO(p,&BLACKHOLE_info);
2209 p = STATIC_LINK2(info,p);
2213 pp = &STATIC_LINK2(info,p);
2220 /* fprintf(stderr, "%d CAFs live\n", i); */
2224 /* -----------------------------------------------------------------------------
2227 Whenever a thread returns to the scheduler after possibly doing
2228 some work, we have to run down the stack and black-hole all the
2229 closures referred to by update frames.
2230 -------------------------------------------------------------------------- */
2233 threadLazyBlackHole(StgTSO *tso)
2235 StgUpdateFrame *update_frame;
2239 stack_end = &tso->stack[tso->stack_size];
2240 update_frame = tso->su;
2243 switch (get_itbl(update_frame)->type) {
2246 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2250 bh = stgCast(StgBlackHole*,update_frame->updatee);
2252 /* if the thunk is already blackholed, it means we've also
2253 * already blackholed the rest of the thunks on this stack,
2254 * so we can stop early.
2257 /* Don't for now: when we enter a CAF, we create a black hole on
2258 * the heap and make the update frame point to it. Thus the
2259 * above optimisation doesn't apply.
2261 if (bh->header.info != &BLACKHOLE_info
2262 && bh->header.info != &BLACKHOLE_BQ_info
2263 && bh->header.info != &CAF_BLACKHOLE_info) {
2264 SET_INFO(bh,&BLACKHOLE_info);
2267 update_frame = update_frame->link;
2271 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2277 barf("threadPaused");
2282 /* -----------------------------------------------------------------------------
2285 * Code largely pinched from old RTS, then hacked to bits. We also do
2286 * lazy black holing here.
2288 * -------------------------------------------------------------------------- */
2291 threadSqueezeStack(StgTSO *tso)
2293 lnat displacement = 0;
2294 StgUpdateFrame *frame;
2295 StgUpdateFrame *next_frame; /* Temporally next */
2296 StgUpdateFrame *prev_frame; /* Temporally previous */
2298 rtsBool prev_was_update_frame;
2300 bottom = &(tso->stack[tso->stack_size]);
2303 /* There must be at least one frame, namely the STOP_FRAME.
2305 ASSERT((P_)frame < bottom);
2307 /* Walk down the stack, reversing the links between frames so that
2308 * we can walk back up as we squeeze from the bottom. Note that
2309 * next_frame and prev_frame refer to next and previous as they were
2310 * added to the stack, rather than the way we see them in this
2311 * walk. (It makes the next loop less confusing.)
2313 * Could stop if we find an update frame pointing to a black hole,
2314 * but see comment in threadLazyBlackHole().
2318 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2319 prev_frame = frame->link;
2320 frame->link = next_frame;
2325 /* Now, we're at the bottom. Frame points to the lowest update
2326 * frame on the stack, and its link actually points to the frame
2327 * above. We have to walk back up the stack, squeezing out empty
2328 * update frames and turning the pointers back around on the way
2331 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2332 * we never want to eliminate it anyway. Just walk one step up
2333 * before starting to squeeze. When you get to the topmost frame,
2334 * remember that there are still some words above it that might have
2341 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2344 * Loop through all of the frames (everything except the very
2345 * bottom). Things are complicated by the fact that we have
2346 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2347 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2349 while (frame != NULL) {
2351 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2352 rtsBool is_update_frame;
2354 next_frame = frame->link;
2355 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2358 * 1. both the previous and current frame are update frames
2359 * 2. the current frame is empty
2361 if (prev_was_update_frame && is_update_frame &&
2362 (P_)prev_frame == frame_bottom + displacement) {
2364 /* Now squeeze out the current frame */
2365 StgClosure *updatee_keep = prev_frame->updatee;
2366 StgClosure *updatee_bypass = frame->updatee;
2369 fprintf(stderr, "squeezing frame at %p\n", frame);
2372 /* Deal with blocking queues. If both updatees have blocked
2373 * threads, then we should merge the queues into the update
2374 * frame that we're keeping.
2376 * Alternatively, we could just wake them up: they'll just go
2377 * straight to sleep on the proper blackhole! This is less code
2378 * and probably less bug prone, although it's probably much
2381 #if 0 /* do it properly... */
2382 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2383 /* Sigh. It has one. Don't lose those threads! */
2384 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2385 /* Urgh. Two queues. Merge them. */
2386 P_ keep_tso = ((StgBlackHole *)updatee_keep)->blocking_queue;
2388 while (keep_tso->link != END_TSO_QUEUE) {
2389 keep_tso = keep_tso->link;
2391 keep_tso->link = ((StgBlackHole *)updatee_bypass)->blocking_queue;
2394 /* For simplicity, just swap the BQ for the BH */
2395 P_ temp = updatee_keep;
2397 updatee_keep = updatee_bypass;
2398 updatee_bypass = temp;
2400 /* Record the swap in the kept frame (below) */
2401 prev_frame->updatee = updatee_keep;
2406 TICK_UPD_SQUEEZED();
2407 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2409 sp = (P_)frame - 1; /* sp = stuff to slide */
2410 displacement += sizeofW(StgUpdateFrame);
2413 /* No squeeze for this frame */
2414 sp = frame_bottom - 1; /* Keep the current frame */
2416 /* Do lazy black-holing.
2418 if (is_update_frame) {
2419 StgBlackHole *bh = (StgBlackHole *)frame->updatee;
2420 if (bh->header.info != &BLACKHOLE_info
2421 && bh->header.info != &BLACKHOLE_BQ_info
2422 && bh->header.info != &CAF_BLACKHOLE_info
2424 SET_INFO(bh,&BLACKHOLE_info);
2428 /* Fix the link in the current frame (should point to the frame below) */
2429 frame->link = prev_frame;
2430 prev_was_update_frame = is_update_frame;
2433 /* Now slide all words from sp up to the next frame */
2435 if (displacement > 0) {
2436 P_ next_frame_bottom;
2438 if (next_frame != NULL)
2439 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2441 next_frame_bottom = tso->sp - 1;
2444 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2448 while (sp >= next_frame_bottom) {
2449 sp[displacement] = *sp;
2453 (P_)prev_frame = (P_)frame + displacement;
2457 tso->sp += displacement;
2458 tso->su = prev_frame;
2461 /* -----------------------------------------------------------------------------
2464 * We have to prepare for GC - this means doing lazy black holing
2465 * here. We also take the opportunity to do stack squeezing if it's
2467 * -------------------------------------------------------------------------- */
2470 threadPaused(StgTSO *tso)
2472 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2473 threadSqueezeStack(tso); /* does black holing too */
2475 threadLazyBlackHole(tso);