1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.12 1999/01/18 16:05:15 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
26 /* STATIC OBJECT LIST.
29 * We maintain a linked list of static objects that are still live.
30 * The requirements for this list are:
32 * - we need to scan the list while adding to it, in order to
33 * scavenge all the static objects (in the same way that
34 * breadth-first scavenging works for dynamic objects).
36 * - we need to be able to tell whether an object is already on
37 * the list, to break loops.
39 * Each static object has a "static link field", which we use for
40 * linking objects on to the list. We use a stack-type list, consing
41 * objects on the front as they are added (this means that the
42 * scavenge phase is depth-first, not breadth-first, but that
45 * A separate list is kept for objects that have been scavenged
46 * already - this is so that we can zero all the marks afterwards.
48 * An object is on the list if its static link field is non-zero; this
49 * means that we have to mark the end of the list with '1', not NULL.
51 * Extra notes for generational GC:
53 * Each generation has a static object list associated with it. When
54 * collecting generations up to N, we treat the static object lists
55 * from generations > N as roots.
57 * We build up a static object list while collecting generations 0..N,
58 * which is then appended to the static object list of generation N+1.
60 StgClosure* static_objects; /* live static objects */
61 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
63 /* N is the oldest generation being collected, where the generations
64 * are numbered starting at 0. A major GC (indicated by the major_gc
65 * flag) is when we're collecting all generations. We only attempt to
66 * deal with static objects and GC CAFs when doing a major GC.
69 static rtsBool major_gc;
71 /* Youngest generation that objects should be evacuated to in
72 * evacuate(). (Logically an argument to evacuate, but it's static
73 * a lot of the time so we optimise it into a global variable).
79 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
80 static rtsBool weak_done; /* all done for this pass */
82 /* Flag indicating failure to evacuate an object to the desired
85 static rtsBool failed_to_evac;
87 /* -----------------------------------------------------------------------------
88 Static function declarations
89 -------------------------------------------------------------------------- */
91 static StgClosure *evacuate(StgClosure *q);
92 static void zeroStaticObjectList(StgClosure* first_static);
93 static rtsBool traverse_weak_ptr_list(void);
94 static void zeroMutableList(StgMutClosure *first);
95 static void revertDeadCAFs(void);
97 static void scavenge_stack(StgPtr p, StgPtr stack_end);
98 static void scavenge_large(step *step);
99 static void scavenge(step *step);
100 static void scavenge_static(void);
101 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
104 static void gcCAFs(void);
107 /* -----------------------------------------------------------------------------
110 For garbage collecting generation N (and all younger generations):
112 - follow all pointers in the root set. the root set includes all
113 mutable objects in all steps in all generations.
115 - for each pointer, evacuate the object it points to into either
116 + to-space in the next higher step in that generation, if one exists,
117 + if the object's generation == N, then evacuate it to the next
118 generation if one exists, or else to-space in the current
120 + if the object's generation < N, then evacuate it to to-space
121 in the next generation.
123 - repeatedly scavenge to-space from each step in each generation
124 being collected until no more objects can be evacuated.
126 - free from-space in each step, and set from-space = to-space.
128 -------------------------------------------------------------------------- */
130 void GarbageCollect(void (*get_roots)(void))
134 lnat live, allocated, collected = 0;
138 CostCentreStack *prev_CCS;
141 /* tell the stats department that we've started a GC */
144 /* attribute any costs to CCS_GC */
150 /* We might have been called from Haskell land by _ccall_GC, in
151 * which case we need to call threadPaused() because the scheduler
152 * won't have done it.
154 if (CurrentTSO) { threadPaused(CurrentTSO); }
156 /* Approximate how much we allocated: number of blocks in the
157 * nursery + blocks allocated via allocate() - unused nusery blocks.
158 * This leaves a little slop at the end of each block, and doesn't
159 * take into account large objects (ToDo).
161 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
162 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
163 allocated -= BLOCK_SIZE_W;
166 /* Figure out which generation to collect
168 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
169 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
173 major_gc = (N == RtsFlags.GcFlags.generations-1);
175 /* check stack sanity *before* GC (ToDo: check all threads) */
176 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
177 IF_DEBUG(sanity, checkFreeListSanity());
179 /* Initialise the static object lists
181 static_objects = END_OF_STATIC_LIST;
182 scavenged_static_objects = END_OF_STATIC_LIST;
184 /* zero the mutable list for the oldest generation (see comment by
185 * zeroMutableList below).
188 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
191 /* Initialise to-space in all the generations/steps that we're
194 for (g = 0; g <= N; g++) {
195 generations[g].mut_list = END_MUT_LIST;
197 for (s = 0; s < generations[g].n_steps; s++) {
198 /* generation 0, step 0 doesn't need to-space */
199 if (g == 0 && s == 0) { continue; }
200 /* Get a free block for to-space. Extra blocks will be chained on
204 step = &generations[g].steps[s];
205 ASSERT(step->gen->no == g);
206 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
207 bd->gen = &generations[g];
210 bd->evacuated = 1; /* it's a to-space block */
211 step->hp = bd->start;
212 step->hpLim = step->hp + BLOCK_SIZE_W;
215 step->to_blocks = 1; /* ???? */
216 step->scan = bd->start;
218 step->new_large_objects = NULL;
219 step->scavenged_large_objects = NULL;
220 /* mark the large objects as not evacuated yet */
221 for (bd = step->large_objects; bd; bd = bd->link) {
227 /* make sure the older generations have at least one block to
228 * allocate into (this makes things easier for copy(), see below.
230 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
231 for (s = 0; s < generations[g].n_steps; s++) {
232 step = &generations[g].steps[s];
233 if (step->hp_bd == NULL) {
235 bd->gen = &generations[g];
238 bd->evacuated = 0; /* *not* a to-space block */
239 step->hp = bd->start;
240 step->hpLim = step->hp + BLOCK_SIZE_W;
245 /* Set the scan pointer for older generations: remember we
246 * still have to scavenge objects that have been promoted. */
247 step->scan = step->hp;
248 step->scan_bd = step->hp_bd;
249 step->to_space = NULL;
251 step->new_large_objects = NULL;
252 step->scavenged_large_objects = NULL;
256 /* -----------------------------------------------------------------------
257 * follow all the roots that we know about:
258 * - mutable lists from each generation > N
259 * we want to *scavenge* these roots, not evacuate them: they're not
260 * going to move in this GC.
261 * Also: do them in reverse generation order. This is because we
262 * often want to promote objects that are pointed to by older
263 * generations early, so we don't have to repeatedly copy them.
264 * Doing the generations in reverse order ensures that we don't end
265 * up in the situation where we want to evac an object to gen 3 and
266 * it has already been evaced to gen 2.
269 StgMutClosure *tmp, **pp;
270 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
271 generations[g].saved_mut_list = generations[g].mut_list;
272 generations[g].mut_list = END_MUT_LIST;
275 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
276 tmp = scavenge_mutable_list(generations[g].saved_mut_list, g);
277 pp = &generations[g].mut_list;
278 while (*pp != END_MUT_LIST) {
279 pp = &(*pp)->mut_link;
285 /* follow all the roots that the application knows about.
290 /* And don't forget to mark the TSO if we got here direct from
293 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
296 /* Mark the weak pointer list, and prepare to detect dead weak
300 old_weak_ptr_list = weak_ptr_list;
301 weak_ptr_list = NULL;
302 weak_done = rtsFalse;
306 /* ToDo: To fix the caf leak, we need to make the commented out
307 * parts of this code do something sensible - as described in
310 extern void markHugsObjects(void);
312 /* ToDo: This (undefined) function should contain the scavenge
313 * loop immediately below this block of code - but I'm not sure
314 * enough of the details to do this myself.
316 scavengeEverything();
317 /* revert dead CAFs and update enteredCAFs list */
322 /* This will keep the CAFs and the attached BCOs alive
323 * but the values will have been reverted
325 scavengeEverything();
330 /* -------------------------------------------------------------------------
331 * Repeatedly scavenge all the areas we know about until there's no
332 * more scavenging to be done.
339 /* scavenge static objects */
340 if (major_gc && static_objects != END_OF_STATIC_LIST) {
344 /* When scavenging the older generations: Objects may have been
345 * evacuated from generations <= N into older generations, and we
346 * need to scavenge these objects. We're going to try to ensure that
347 * any evacuations that occur move the objects into at least the
348 * same generation as the object being scavenged, otherwise we
349 * have to create new entries on the mutable list for the older
353 /* scavenge each step in generations 0..maxgen */
356 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
357 for (s = 0; s < generations[gen].n_steps; s++) {
358 step = &generations[gen].steps[s];
360 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
364 if (step->new_large_objects != NULL) {
365 scavenge_large(step);
371 if (flag) { goto loop; }
373 /* must be last... */
374 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
379 /* Set the maximum blocks for the oldest generation, based on twice
380 * the amount of live data now, adjusted to fit the maximum heap
383 * This is an approximation, since in the worst case we'll need
384 * twice the amount of live data plus whatever space the other
388 oldest_gen->max_blocks =
389 stg_max(oldest_gen->steps[0].to_blocks * 2,
390 RtsFlags.GcFlags.minAllocAreaSize * 4);
391 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
392 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
393 if (((int)oldest_gen->max_blocks - (int)oldest_gen->steps[0].to_blocks) <
394 (RtsFlags.GcFlags.pcFreeHeap *
395 RtsFlags.GcFlags.maxHeapSize / 200)) {
401 /* run through all the generations/steps and tidy up
403 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
406 generations[g].collections++; /* for stats */
409 for (s = 0; s < generations[g].n_steps; s++) {
411 step = &generations[g].steps[s];
413 if (!(g == 0 && s == 0)) {
414 /* Tidy the end of the to-space chains */
415 step->hp_bd->free = step->hp;
416 step->hp_bd->link = NULL;
419 /* for generations we collected... */
422 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
424 /* free old memory and shift to-space into from-space for all
425 * the collected steps (except the allocation area). These
426 * freed blocks will probaby be quickly recycled.
428 if (!(g == 0 && s == 0)) {
429 freeChain(step->blocks);
430 step->blocks = step->to_space;
431 step->n_blocks = step->to_blocks;
432 step->to_space = NULL;
434 for (bd = step->blocks; bd != NULL; bd = bd->link) {
435 bd->evacuated = 0; /* now from-space */
439 /* LARGE OBJECTS. The current live large objects are chained on
440 * scavenged_large, having been moved during garbage
441 * collection from large_objects. Any objects left on
442 * large_objects list are therefore dead, so we free them here.
444 for (bd = step->large_objects; bd != NULL; bd = next) {
449 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
452 step->large_objects = step->scavenged_large_objects;
454 /* Set the maximum blocks for this generation, interpolating
455 * between the maximum size of the oldest and youngest
458 * max_blocks = alloc_area_size +
459 * (oldgen_max_blocks - alloc_area_size) * G
460 * -----------------------------------------
464 generations[g].max_blocks =
465 RtsFlags.GcFlags.minAllocAreaSize +
466 (((oldest_gen->max_blocks - RtsFlags.GcFlags.minAllocAreaSize) * g)
467 / (RtsFlags.GcFlags.generations-1));
470 /* for older generations... */
473 /* For older generations, we need to append the
474 * scavenged_large_object list (i.e. large objects that have been
475 * promoted during this GC) to the large_object list for that step.
477 for (bd = step->scavenged_large_objects; bd; bd = next) {
480 dbl_link_onto(bd, &step->large_objects);
483 /* add the new blocks we promoted during this GC */
484 step->n_blocks += step->to_blocks;
489 /* revert dead CAFs and update enteredCAFs list */
492 /* mark the garbage collected CAFs as dead */
494 if (major_gc) { gcCAFs(); }
497 /* zero the scavenged static object list */
499 zeroStaticObjectList(scavenged_static_objects);
504 for (bd = g0s0->blocks; bd; bd = bd->link) {
505 bd->free = bd->start;
506 ASSERT(bd->gen == g0);
507 ASSERT(bd->step == g0s0);
509 current_nursery = g0s0->blocks;
512 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
513 for (s = 0; s < generations[g].n_steps; s++) {
514 /* approximate amount of live data (doesn't take into account slop
515 * at end of each block). ToDo: this more accurately.
517 if (g == 0 && s == 0) { continue; }
518 step = &generations[g].steps[s];
519 live += step->n_blocks * BLOCK_SIZE_W +
520 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
524 /* Free the small objects allocated via allocate(), since this will
525 * all have been copied into G0S1 now.
527 if (small_alloc_list != NULL) {
528 freeChain(small_alloc_list);
530 small_alloc_list = NULL;
532 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
534 /* start any pending finalisers */
535 scheduleFinalisers(old_weak_ptr_list);
537 /* check sanity after GC */
539 for (g = 0; g <= N; g++) {
540 for (s = 0; s < generations[g].n_steps; s++) {
541 if (g == 0 && s == 0) { continue; }
542 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
543 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
546 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
547 for (s = 0; s < generations[g].n_steps; s++) {
548 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks,
549 generations[g].steps[s].blocks->start));
550 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
553 IF_DEBUG(sanity, checkFreeListSanity());
556 IF_DEBUG(gc, stat_describe_gens());
559 /* symbol-table based profiling */
560 /* heapCensus(to_space); */ /* ToDo */
563 /* restore enclosing cost centre */
568 /* check for memory leaks if sanity checking is on */
569 IF_DEBUG(sanity, memInventory());
571 /* ok, GC over: tell the stats department what happened. */
572 stat_endGC(allocated, collected, live, N);
575 /* -----------------------------------------------------------------------------
578 traverse_weak_ptr_list is called possibly many times during garbage
579 collection. It returns a flag indicating whether it did any work
580 (i.e. called evacuate on any live pointers).
582 Invariant: traverse_weak_ptr_list is called when the heap is in an
583 idempotent state. That means that there are no pending
584 evacuate/scavenge operations. This invariant helps the weak
585 pointer code decide which weak pointers are dead - if there are no
586 new live weak pointers, then all the currently unreachable ones are
589 For generational GC: we just don't try to finalise weak pointers in
590 older generations than the one we're collecting. This could
591 probably be optimised by keeping per-generation lists of weak
592 pointers, but for a few weak pointers this scheme will work.
593 -------------------------------------------------------------------------- */
596 traverse_weak_ptr_list(void)
598 StgWeak *w, **last_w, *next_w;
600 const StgInfoTable *info;
601 rtsBool flag = rtsFalse;
603 if (weak_done) { return rtsFalse; }
605 /* doesn't matter where we evacuate values/finalisers to, since
606 * these pointers are treated as roots (iff the keys are alive).
610 last_w = &old_weak_ptr_list;
611 for (w = old_weak_ptr_list; w; w = next_w) {
614 /* ignore weak pointers in older generations */
615 if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) {
616 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive (in old gen) at %p\n", w));
617 /* remove this weak ptr from the old_weak_ptr list */
619 /* and put it on the new weak ptr list */
621 w->link = weak_ptr_list;
627 info = get_itbl(target);
628 switch (info->type) {
633 case IND_OLDGEN: /* rely on compatible layout with StgInd */
634 case IND_OLDGEN_PERM:
635 /* follow indirections */
636 target = ((StgInd *)target)->indirectee;
640 /* If key is alive, evacuate value and finaliser and
641 * place weak ptr on new weak ptr list.
643 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w));
644 w->key = ((StgEvacuated *)target)->evacuee;
645 w->value = evacuate(w->value);
646 w->finaliser = evacuate(w->finaliser);
648 /* remove this weak ptr from the old_weak_ptr list */
651 /* and put it on the new weak ptr list */
653 w->link = weak_ptr_list;
658 default: /* key is dead */
665 /* If we didn't make any changes, then we can go round and kill all
666 * the dead weak pointers. The old_weak_ptr list is used as a list
667 * of pending finalisers later on.
669 if (flag == rtsFalse) {
670 for (w = old_weak_ptr_list; w; w = w->link) {
671 w->value = evacuate(w->value);
672 w->finaliser = evacuate(w->finaliser);
681 MarkRoot(StgClosure *root)
683 root = evacuate(root);
687 static inline void addBlock(step *step)
689 bdescr *bd = allocBlock();
693 if (step->gen->no <= N) {
699 step->hp_bd->free = step->hp;
700 step->hp_bd->link = bd;
701 step->hp = bd->start;
702 step->hpLim = step->hp + BLOCK_SIZE_W;
707 static __inline__ StgClosure *
708 copy(StgClosure *src, nat size, bdescr *bd)
713 /* Find out where we're going, using the handy "to" pointer in
714 * the step of the source object. If it turns out we need to
715 * evacuate to an older generation, adjust it here (see comment
719 if (step->gen->no < evac_gen) {
720 step = &generations[evac_gen].steps[0];
723 /* chain a new block onto the to-space for the destination step if
726 if (step->hp + size >= step->hpLim) {
732 for(to = dest, from = (P_)src; size>0; --size) {
735 return (StgClosure *)dest;
738 /* Special version of copy() for when we only want to copy the info
739 * pointer of an object, but reserve some padding after it. This is
740 * used to optimise evacuation of BLACKHOLEs.
743 static __inline__ StgClosure *
744 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, bdescr *bd)
750 if (step->gen->no < evac_gen) {
751 step = &generations[evac_gen].steps[0];
754 if (step->hp + size_to_reserve >= step->hpLim) {
759 step->hp += size_to_reserve;
760 for(to = dest, from = (P_)src; size_to_copy>0; --size_to_copy) {
764 return (StgClosure *)dest;
767 static __inline__ void
768 upd_evacuee(StgClosure *p, StgClosure *dest)
770 StgEvacuated *q = (StgEvacuated *)p;
772 SET_INFO(q,&EVACUATED_info);
776 /* -----------------------------------------------------------------------------
777 Evacuate a mutable object
779 If we evacuate a mutable object to an old generation, cons the
780 object onto the older generation's mutable list.
781 -------------------------------------------------------------------------- */
784 evacuate_mutable(StgMutClosure *c)
789 if (bd->gen->no > 0) {
790 c->mut_link = bd->gen->mut_list;
791 bd->gen->mut_list = c;
795 /* -----------------------------------------------------------------------------
796 Evacuate a large object
798 This just consists of removing the object from the (doubly-linked)
799 large_alloc_list, and linking it on to the (singly-linked)
800 new_large_objects list, from where it will be scavenged later.
802 Convention: bd->evacuated is /= 0 for a large object that has been
803 evacuated, or 0 otherwise.
804 -------------------------------------------------------------------------- */
807 evacuate_large(StgPtr p, rtsBool mutable)
809 bdescr *bd = Bdescr(p);
812 /* should point to the beginning of the block */
813 ASSERT(((W_)p & BLOCK_MASK) == 0);
815 /* already evacuated? */
817 /* Don't forget to set the failed_to_evac flag if we didn't get
818 * the desired destination (see comments in evacuate()).
820 if (bd->gen->no < evac_gen) {
821 failed_to_evac = rtsTrue;
827 /* remove from large_object list */
829 bd->back->link = bd->link;
830 } else { /* first object in the list */
831 step->large_objects = bd->link;
834 bd->link->back = bd->back;
837 /* link it on to the evacuated large object list of the destination step
840 if (step->gen->no < evac_gen) {
841 step = &generations[evac_gen].steps[0];
846 bd->link = step->new_large_objects;
847 step->new_large_objects = bd;
851 evacuate_mutable((StgMutClosure *)p);
855 /* -----------------------------------------------------------------------------
856 Adding a MUT_CONS to an older generation.
858 This is necessary from time to time when we end up with an
859 old-to-new generation pointer in a non-mutable object. We defer
860 the promotion until the next GC.
861 -------------------------------------------------------------------------- */
864 mkMutCons(StgClosure *ptr, generation *gen)
869 step = &gen->steps[0];
871 /* chain a new block onto the to-space for the destination step if
874 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
878 q = (StgMutVar *)step->hp;
879 step->hp += sizeofW(StgMutVar);
881 SET_HDR(q,&MUT_CONS_info,CCS_GC);
883 evacuate_mutable((StgMutClosure *)q);
885 return (StgClosure *)q;
888 /* -----------------------------------------------------------------------------
891 This is called (eventually) for every live object in the system.
893 The caller to evacuate specifies a desired generation in the
894 evac_gen global variable. The following conditions apply to
895 evacuating an object which resides in generation M when we're
896 collecting up to generation N
900 else evac to step->to
902 if M < evac_gen evac to evac_gen, step 0
904 if the object is already evacuated, then we check which generation
907 if M >= evac_gen do nothing
908 if M < evac_gen set failed_to_evac flag to indicate that we
909 didn't manage to evacuate this object into evac_gen.
911 -------------------------------------------------------------------------- */
915 evacuate(StgClosure *q)
919 const StgInfoTable *info;
922 if (!LOOKS_LIKE_STATIC(q)) {
924 if (bd->gen->no > N) {
925 /* Can't evacuate this object, because it's in a generation
926 * older than the ones we're collecting. Let's hope that it's
927 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
929 if (bd->gen->no < evac_gen) {
931 failed_to_evac = rtsTrue;
937 /* make sure the info pointer is into text space */
938 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
939 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
942 switch (info -> type) {
945 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),bd);
951 to = copy(q,sizeW_fromITBL(info),bd);
953 evacuate_mutable((StgMutClosure *)to);
960 case IND_OLDGEN_PERM:
965 to = copy(q,sizeW_fromITBL(info),bd);
971 to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),bd);
976 to = copy(q,BLACKHOLE_sizeW(),bd);
978 evacuate_mutable((StgMutClosure *)to);
983 const StgInfoTable* selectee_info;
984 StgClosure* selectee = ((StgSelector*)q)->selectee;
987 selectee_info = get_itbl(selectee);
988 switch (selectee_info->type) {
992 StgNat32 offset = info->layout.selector_offset;
994 /* check that the size is in range */
996 (StgNat32)(selectee_info->layout.payload.ptrs +
997 selectee_info->layout.payload.nptrs));
999 /* perform the selection! */
1000 q = selectee->payload[offset];
1002 /* if we're already in to-space, there's no need to continue
1003 * with the evacuation, just update the source address with
1004 * a pointer to the (evacuated) constructor field.
1006 if (IS_USER_PTR(q)) {
1007 bdescr *bd = Bdescr((P_)q);
1008 if (bd->evacuated) {
1009 if (bd->gen->no < evac_gen) {
1010 failed_to_evac = rtsTrue;
1016 /* otherwise, carry on and evacuate this constructor field,
1017 * (but not the constructor itself)
1026 case IND_OLDGEN_PERM:
1027 selectee = stgCast(StgInd *,selectee)->indirectee;
1031 selectee = stgCast(StgCAF *,selectee)->value;
1035 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1040 case THUNK_SELECTOR:
1041 /* aargh - do recursively???? */
1046 /* not evaluated yet */
1050 barf("evacuate: THUNK_SELECTOR: strange selectee");
1053 to = copy(q,THUNK_SELECTOR_sizeW(),bd);
1059 /* follow chains of indirections, don't evacuate them */
1060 q = ((StgInd*)q)->indirectee;
1063 /* ToDo: optimise STATIC_LINK for known cases.
1064 - FUN_STATIC : payload[0]
1065 - THUNK_STATIC : payload[1]
1066 - IND_STATIC : payload[1]
1070 if (info->srt_len == 0) { /* small optimisation */
1076 /* don't want to evacuate these, but we do want to follow pointers
1077 * from SRTs - see scavenge_static.
1080 /* put the object on the static list, if necessary.
1082 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1083 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1084 static_objects = (StgClosure *)q;
1088 case CONSTR_INTLIKE:
1089 case CONSTR_CHARLIKE:
1090 case CONSTR_NOCAF_STATIC:
1091 /* no need to put these on the static linked list, they don't need
1106 /* shouldn't see these */
1107 barf("evacuate: stack frame\n");
1111 /* these are special - the payload is a copy of a chunk of stack,
1113 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),bd);
1118 /* Already evacuated, just return the forwarding address.
1119 * HOWEVER: if the requested destination generation (evac_gen) is
1120 * older than the actual generation (because the object was
1121 * already evacuated to a younger generation) then we have to
1122 * set the failed_to_evac flag to indicate that we couldn't
1123 * manage to promote the object to the desired generation.
1125 if (evac_gen > 0) { /* optimisation */
1126 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1127 if (Bdescr((P_)p)->gen->no < evac_gen) {
1128 /* fprintf(stderr,"evac failed!\n");*/
1129 failed_to_evac = rtsTrue;
1132 return ((StgEvacuated*)q)->evacuee;
1137 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1139 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1140 evacuate_large((P_)q, rtsFalse);
1143 /* just copy the block */
1144 to = copy(q,size,bd);
1151 case MUT_ARR_PTRS_FROZEN:
1153 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1155 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1156 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1159 /* just copy the block */
1160 to = copy(q,size,bd);
1162 if (info->type == MUT_ARR_PTRS) {
1163 evacuate_mutable((StgMutClosure *)to);
1171 StgTSO *tso = stgCast(StgTSO *,q);
1172 nat size = tso_sizeW(tso);
1175 /* Large TSOs don't get moved, so no relocation is required.
1177 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1178 evacuate_large((P_)q, rtsTrue);
1181 /* To evacuate a small TSO, we need to relocate the update frame
1185 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),bd);
1187 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1189 /* relocate the stack pointers... */
1190 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1191 new_tso->sp = (StgPtr)new_tso->sp + diff;
1192 new_tso->splim = (StgPtr)new_tso->splim + diff;
1194 relocate_TSO(tso, new_tso);
1195 upd_evacuee(q,(StgClosure *)new_tso);
1197 evacuate_mutable((StgMutClosure *)new_tso);
1198 return (StgClosure *)new_tso;
1204 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1208 barf("evacuate: strange closure type");
1214 /* -----------------------------------------------------------------------------
1215 relocate_TSO is called just after a TSO has been copied from src to
1216 dest. It adjusts the update frame list for the new location.
1217 -------------------------------------------------------------------------- */
1220 relocate_TSO(StgTSO *src, StgTSO *dest)
1227 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1231 while ((P_)su < dest->stack + dest->stack_size) {
1232 switch (get_itbl(su)->type) {
1234 /* GCC actually manages to common up these three cases! */
1237 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1242 cf = (StgCatchFrame *)su;
1243 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1248 sf = (StgSeqFrame *)su;
1249 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1258 barf("relocate_TSO");
1267 scavenge_srt(const StgInfoTable *info)
1269 StgClosure **srt, **srt_end;
1271 /* evacuate the SRT. If srt_len is zero, then there isn't an
1272 * srt field in the info table. That's ok, because we'll
1273 * never dereference it.
1275 srt = stgCast(StgClosure **,info->srt);
1276 srt_end = srt + info->srt_len;
1277 for (; srt < srt_end; srt++) {
1282 /* -----------------------------------------------------------------------------
1283 Scavenge a given step until there are no more objects in this step
1286 evac_gen is set by the caller to be either zero (for a step in a
1287 generation < N) or G where G is the generation of the step being
1290 We sometimes temporarily change evac_gen back to zero if we're
1291 scavenging a mutable object where early promotion isn't such a good
1293 -------------------------------------------------------------------------- */
1297 scavenge(step *step)
1300 const StgInfoTable *info;
1302 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1307 failed_to_evac = rtsFalse;
1309 /* scavenge phase - standard breadth-first scavenging of the
1313 while (bd != step->hp_bd || p < step->hp) {
1315 /* If we're at the end of this block, move on to the next block */
1316 if (bd != step->hp_bd && p == bd->free) {
1322 q = p; /* save ptr to object */
1324 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1325 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1327 info = get_itbl((StgClosure *)p);
1328 switch (info -> type) {
1332 StgBCO* bco = stgCast(StgBCO*,p);
1334 for (i = 0; i < bco->n_ptrs; i++) {
1335 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1337 p += bco_sizeW(bco);
1342 /* treat MVars specially, because we don't want to evacuate the
1343 * mut_link field in the middle of the closure.
1346 StgMVar *mvar = ((StgMVar *)p);
1348 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1349 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1350 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1351 p += sizeofW(StgMVar);
1352 evac_gen = saved_evac_gen;
1365 case IND_OLDGEN_PERM:
1371 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1372 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1373 (StgClosure *)*p = evacuate((StgClosure *)*p);
1375 p += info->layout.payload.nptrs;
1380 /* ignore MUT_CONSs */
1381 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1383 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1384 evac_gen = saved_evac_gen;
1386 p += sizeofW(StgMutVar);
1391 p += BLACKHOLE_sizeW();
1396 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1397 (StgClosure *)bh->blocking_queue =
1398 evacuate((StgClosure *)bh->blocking_queue);
1399 if (failed_to_evac) {
1400 failed_to_evac = rtsFalse;
1401 evacuate_mutable((StgMutClosure *)bh);
1403 p += BLACKHOLE_sizeW();
1407 case THUNK_SELECTOR:
1409 StgSelector *s = (StgSelector *)p;
1410 s->selectee = evacuate(s->selectee);
1411 p += THUNK_SELECTOR_sizeW();
1417 barf("scavenge:IND???\n");
1419 case CONSTR_INTLIKE:
1420 case CONSTR_CHARLIKE:
1422 case CONSTR_NOCAF_STATIC:
1426 /* Shouldn't see a static object here. */
1427 barf("scavenge: STATIC object\n");
1439 /* Shouldn't see stack frames here. */
1440 barf("scavenge: stack frame\n");
1442 case AP_UPD: /* same as PAPs */
1444 /* Treat a PAP just like a section of stack, not forgetting to
1445 * evacuate the function pointer too...
1448 StgPAP* pap = stgCast(StgPAP*,p);
1450 pap->fun = evacuate(pap->fun);
1451 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1452 p += pap_sizeW(pap);
1458 /* nothing to follow */
1459 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1463 /* follow everything */
1467 evac_gen = 0; /* repeatedly mutable */
1468 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1469 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1470 (StgClosure *)*p = evacuate((StgClosure *)*p);
1472 evac_gen = saved_evac_gen;
1476 case MUT_ARR_PTRS_FROZEN:
1477 /* follow everything */
1479 StgPtr start = p, next;
1481 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1482 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1483 (StgClosure *)*p = evacuate((StgClosure *)*p);
1485 if (failed_to_evac) {
1486 /* we can do this easier... */
1487 evacuate_mutable((StgMutClosure *)start);
1488 failed_to_evac = rtsFalse;
1499 /* chase the link field for any TSOs on the same queue */
1500 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1501 /* scavenge this thread's stack */
1502 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1503 evac_gen = saved_evac_gen;
1504 p += tso_sizeW(tso);
1511 barf("scavenge: unimplemented/strange closure type\n");
1517 /* If we didn't manage to promote all the objects pointed to by
1518 * the current object, then we have to designate this object as
1519 * mutable (because it contains old-to-new generation pointers).
1521 if (failed_to_evac) {
1522 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1523 failed_to_evac = rtsFalse;
1531 /* -----------------------------------------------------------------------------
1532 Scavenge one object.
1534 This is used for objects that are temporarily marked as mutable
1535 because they contain old-to-new generation pointers. Only certain
1536 objects can have this property.
1537 -------------------------------------------------------------------------- */
1539 scavenge_one(StgPtr p)
1544 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1545 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1547 info = get_itbl((StgClosure *)p);
1549 switch (info -> type) {
1557 case IND_OLDGEN_PERM:
1563 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1564 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1565 (StgClosure *)*p = evacuate((StgClosure *)*p);
1574 case THUNK_SELECTOR:
1576 StgSelector *s = (StgSelector *)p;
1577 s->selectee = evacuate(s->selectee);
1581 case AP_UPD: /* same as PAPs */
1583 /* Treat a PAP just like a section of stack, not forgetting to
1584 * evacuate the function pointer too...
1587 StgPAP* pap = stgCast(StgPAP*,p);
1589 pap->fun = evacuate(pap->fun);
1590 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1595 /* This might happen if for instance a MUT_CONS was pointing to a
1596 * THUNK which has since been updated. The IND_OLDGEN will
1597 * be on the mutable list anyway, so we don't need to do anything
1603 barf("scavenge_one: strange object");
1606 no_luck = failed_to_evac;
1607 failed_to_evac = rtsFalse;
1612 /* -----------------------------------------------------------------------------
1613 Scavenging mutable lists.
1615 We treat the mutable list of each generation > N (i.e. all the
1616 generations older than the one being collected) as roots. We also
1617 remove non-mutable objects from the mutable list at this point.
1618 -------------------------------------------------------------------------- */
1620 static StgMutClosure *
1621 scavenge_mutable_list(StgMutClosure *p, nat gen)
1624 StgMutClosure *start;
1625 StgMutClosure **prev;
1632 failed_to_evac = rtsFalse;
1634 for (; p != END_MUT_LIST; p = *prev) {
1636 /* make sure the info pointer is into text space */
1637 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1638 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1641 switch(info->type) {
1643 case MUT_ARR_PTRS_FROZEN:
1644 /* remove this guy from the mutable list, but follow the ptrs
1645 * anyway (and make sure they get promoted to this gen).
1650 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1652 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1653 (StgClosure *)*q = evacuate((StgClosure *)*q);
1657 if (failed_to_evac) {
1658 failed_to_evac = rtsFalse;
1659 prev = &p->mut_link;
1661 *prev = p->mut_link;
1667 /* follow everything */
1668 prev = &p->mut_link;
1672 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1673 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1674 (StgClosure *)*q = evacuate((StgClosure *)*q);
1680 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1681 * it from the mutable list if possible by promoting whatever it
1684 if (p->header.info == &MUT_CONS_info) {
1686 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1687 /* didn't manage to promote everything, so leave the
1688 * MUT_CONS on the list.
1690 prev = &p->mut_link;
1692 *prev = p->mut_link;
1696 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1697 prev = &p->mut_link;
1703 StgMVar *mvar = (StgMVar *)p;
1704 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1705 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1706 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1707 prev = &p->mut_link;
1712 /* follow ptrs and remove this from the mutable list */
1714 StgTSO *tso = (StgTSO *)p;
1716 /* Don't bother scavenging if this thread is dead
1718 if (!(tso->whatNext == ThreadComplete ||
1719 tso->whatNext == ThreadKilled)) {
1720 /* Don't need to chase the link field for any TSOs on the
1721 * same queue. Just scavenge this thread's stack
1723 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1726 /* Don't take this TSO off the mutable list - it might still
1727 * point to some younger objects (because we set evac_gen to 0
1730 prev = &tso->mut_link;
1735 case IND_OLDGEN_PERM:
1737 /* Try to pull the indirectee into this generation, so we can
1738 * remove the indirection from the mutable list.
1741 ((StgIndOldGen *)p)->indirectee =
1742 evacuate(((StgIndOldGen *)p)->indirectee);
1745 if (failed_to_evac) {
1746 failed_to_evac = rtsFalse;
1747 prev = &p->mut_link;
1749 *prev = p->mut_link;
1750 /* the mut_link field of an IND_STATIC is overloaded as the
1751 * static link field too (it just so happens that we don't need
1752 * both at the same time), so we need to NULL it out when
1753 * removing this object from the mutable list because the static
1754 * link fields are all assumed to be NULL before doing a major
1763 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1764 (StgClosure *)bh->blocking_queue =
1765 evacuate((StgClosure *)bh->blocking_queue);
1766 prev = &p->mut_link;
1771 /* shouldn't have anything else on the mutables list */
1772 barf("scavenge_mutable_object: non-mutable object?");
1779 scavenge_static(void)
1781 StgClosure* p = static_objects;
1782 const StgInfoTable *info;
1784 /* Always evacuate straight to the oldest generation for static
1786 evac_gen = oldest_gen->no;
1788 /* keep going until we've scavenged all the objects on the linked
1790 while (p != END_OF_STATIC_LIST) {
1794 /* make sure the info pointer is into text space */
1795 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1796 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1798 /* Take this object *off* the static_objects list,
1799 * and put it on the scavenged_static_objects list.
1801 static_objects = STATIC_LINK(info,p);
1802 STATIC_LINK(info,p) = scavenged_static_objects;
1803 scavenged_static_objects = p;
1805 switch (info -> type) {
1809 StgInd *ind = (StgInd *)p;
1810 ind->indirectee = evacuate(ind->indirectee);
1812 /* might fail to evacuate it, in which case we have to pop it
1813 * back on the mutable list (and take it off the
1814 * scavenged_static list because the static link and mut link
1815 * pointers are one and the same).
1817 if (failed_to_evac) {
1818 failed_to_evac = rtsFalse;
1819 scavenged_static_objects = STATIC_LINK(info,p);
1820 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
1821 oldest_gen->mut_list = (StgMutClosure *)ind;
1835 next = (P_)p->payload + info->layout.payload.ptrs;
1836 /* evacuate the pointers */
1837 for (q = (P_)p->payload; q < next; q++) {
1838 (StgClosure *)*q = evacuate((StgClosure *)*q);
1844 barf("scavenge_static");
1847 ASSERT(failed_to_evac == rtsFalse);
1849 /* get the next static object from the list. Remeber, there might
1850 * be more stuff on this list now that we've done some evacuating!
1851 * (static_objects is a global)
1857 /* -----------------------------------------------------------------------------
1858 scavenge_stack walks over a section of stack and evacuates all the
1859 objects pointed to by it. We can use the same code for walking
1860 PAPs, since these are just sections of copied stack.
1861 -------------------------------------------------------------------------- */
1864 scavenge_stack(StgPtr p, StgPtr stack_end)
1867 const StgInfoTable* info;
1871 * Each time around this loop, we are looking at a chunk of stack
1872 * that starts with either a pending argument section or an
1873 * activation record.
1876 while (p < stack_end) {
1877 q = *stgCast(StgPtr*,p);
1879 /* If we've got a tag, skip over that many words on the stack */
1880 if (IS_ARG_TAG(stgCast(StgWord,q))) {
1885 /* Is q a pointer to a closure?
1887 if (! LOOKS_LIKE_GHC_INFO(q)) {
1890 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
1891 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
1893 /* otherwise, must be a pointer into the allocation space.
1897 (StgClosure *)*p = evacuate((StgClosure *)q);
1903 * Otherwise, q must be the info pointer of an activation
1904 * record. All activation records have 'bitmap' style layout
1907 info = get_itbl(stgCast(StgClosure*,p));
1909 switch (info->type) {
1911 /* Dynamic bitmap: the mask is stored on the stack */
1913 bitmap = stgCast(StgRetDyn*,p)->liveness;
1914 p = &payloadWord(stgCast(StgRetDyn*,p),0);
1917 /* probably a slow-entry point return address: */
1923 /* Specialised code for update frames, since they're so common.
1924 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
1925 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
1929 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1931 StgClosureType type = get_itbl(frame->updatee)->type;
1933 p += sizeofW(StgUpdateFrame);
1934 if (type == EVACUATED) {
1935 frame->updatee = evacuate(frame->updatee);
1938 bdescr *bd = Bdescr((P_)frame->updatee);
1939 if (bd->gen->no > N) {
1940 if (bd->gen->no < evac_gen) {
1941 failed_to_evac = rtsTrue;
1948 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
1949 sizeofW(StgHeader), bd);
1950 upd_evacuee(frame->updatee,to);
1951 frame->updatee = to;
1954 to = copy(frame->updatee, BLACKHOLE_sizeW(), bd);
1955 upd_evacuee(frame->updatee,to);
1956 frame->updatee = to;
1957 evacuate_mutable((StgMutClosure *)to);
1960 barf("scavenge_stack: UPDATE_FRAME updatee");
1965 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
1972 bitmap = info->layout.bitmap;
1975 while (bitmap != 0) {
1976 if ((bitmap & 1) == 0) {
1977 (StgClosure *)*p = evacuate((StgClosure *)*p);
1980 bitmap = bitmap >> 1;
1987 /* large bitmap (> 32 entries) */
1992 StgLargeBitmap *large_bitmap;
1995 large_bitmap = info->layout.large_bitmap;
1998 for (i=0; i<large_bitmap->size; i++) {
1999 bitmap = large_bitmap->bitmap[i];
2000 q = p + sizeof(W_) * 8;
2001 while (bitmap != 0) {
2002 if ((bitmap & 1) == 0) {
2003 (StgClosure *)*p = evacuate((StgClosure *)*p);
2006 bitmap = bitmap >> 1;
2008 if (i+1 < large_bitmap->size) {
2010 (StgClosure *)*p = evacuate((StgClosure *)*p);
2016 /* and don't forget to follow the SRT */
2021 barf("scavenge_stack: weird activation record found on stack.\n");
2026 /*-----------------------------------------------------------------------------
2027 scavenge the large object list.
2029 evac_gen set by caller; similar games played with evac_gen as with
2030 scavenge() - see comment at the top of scavenge(). Most large
2031 objects are (repeatedly) mutable, so most of the time evac_gen will
2033 --------------------------------------------------------------------------- */
2036 scavenge_large(step *step)
2040 const StgInfoTable* info;
2041 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2043 evac_gen = 0; /* most objects are mutable */
2044 bd = step->new_large_objects;
2046 for (; bd != NULL; bd = step->new_large_objects) {
2048 /* take this object *off* the large objects list and put it on
2049 * the scavenged large objects list. This is so that we can
2050 * treat new_large_objects as a stack and push new objects on
2051 * the front when evacuating.
2053 step->new_large_objects = bd->link;
2054 dbl_link_onto(bd, &step->scavenged_large_objects);
2057 info = get_itbl(stgCast(StgClosure*,p));
2059 switch (info->type) {
2061 /* only certain objects can be "large"... */
2065 /* nothing to follow */
2069 /* follow everything */
2073 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2074 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2075 (StgClosure *)*p = evacuate((StgClosure *)*p);
2080 case MUT_ARR_PTRS_FROZEN:
2081 /* follow everything */
2083 StgPtr start = p, next;
2085 evac_gen = saved_evac_gen; /* not really mutable */
2086 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2087 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2088 (StgClosure *)*p = evacuate((StgClosure *)*p);
2091 if (failed_to_evac) {
2092 evacuate_mutable((StgMutClosure *)start);
2099 StgBCO* bco = stgCast(StgBCO*,p);
2101 evac_gen = saved_evac_gen;
2102 for (i = 0; i < bco->n_ptrs; i++) {
2103 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2114 /* chase the link field for any TSOs on the same queue */
2115 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2116 /* scavenge this thread's stack */
2117 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2122 barf("scavenge_large: unknown/strange object");
2128 zeroStaticObjectList(StgClosure* first_static)
2132 const StgInfoTable *info;
2134 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2136 link = STATIC_LINK(info, p);
2137 STATIC_LINK(info,p) = NULL;
2141 /* This function is only needed because we share the mutable link
2142 * field with the static link field in an IND_STATIC, so we have to
2143 * zero the mut_link field before doing a major GC, which needs the
2144 * static link field.
2146 * It doesn't do any harm to zero all the mutable link fields on the
2150 zeroMutableList(StgMutClosure *first)
2152 StgMutClosure *next, *c;
2154 for (c = first; c != END_MUT_LIST; c = next) {
2160 /* -----------------------------------------------------------------------------
2162 -------------------------------------------------------------------------- */
2164 void RevertCAFs(void)
2166 while (enteredCAFs != END_CAF_LIST) {
2167 StgCAF* caf = enteredCAFs;
2169 enteredCAFs = caf->link;
2170 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2171 SET_INFO(caf,&CAF_UNENTERED_info);
2172 caf->value = stgCast(StgClosure*,0xdeadbeef);
2173 caf->link = stgCast(StgCAF*,0xdeadbeef);
2177 void revertDeadCAFs(void)
2179 StgCAF* caf = enteredCAFs;
2180 enteredCAFs = END_CAF_LIST;
2181 while (caf != END_CAF_LIST) {
2182 StgCAF* next = caf->link;
2184 switch(GET_INFO(caf)->type) {
2187 /* This object has been evacuated, it must be live. */
2188 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2189 new->link = enteredCAFs;
2195 SET_INFO(caf,&CAF_UNENTERED_info);
2196 caf->value = stgCast(StgClosure*,0xdeadbeef);
2197 caf->link = stgCast(StgCAF*,0xdeadbeef);
2201 barf("revertDeadCAFs: enteredCAFs list corrupted");
2207 /* -----------------------------------------------------------------------------
2208 Sanity code for CAF garbage collection.
2210 With DEBUG turned on, we manage a CAF list in addition to the SRT
2211 mechanism. After GC, we run down the CAF list and blackhole any
2212 CAFs which have been garbage collected. This means we get an error
2213 whenever the program tries to enter a garbage collected CAF.
2215 Any garbage collected CAFs are taken off the CAF list at the same
2217 -------------------------------------------------------------------------- */
2225 const StgInfoTable *info;
2236 ASSERT(info->type == IND_STATIC);
2238 if (STATIC_LINK(info,p) == NULL) {
2239 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2241 SET_INFO(p,&BLACKHOLE_info);
2242 p = STATIC_LINK2(info,p);
2246 pp = &STATIC_LINK2(info,p);
2253 /* fprintf(stderr, "%d CAFs live\n", i); */
2257 /* -----------------------------------------------------------------------------
2260 Whenever a thread returns to the scheduler after possibly doing
2261 some work, we have to run down the stack and black-hole all the
2262 closures referred to by update frames.
2263 -------------------------------------------------------------------------- */
2266 threadLazyBlackHole(StgTSO *tso)
2268 StgUpdateFrame *update_frame;
2269 StgBlockingQueue *bh;
2272 stack_end = &tso->stack[tso->stack_size];
2273 update_frame = tso->su;
2276 switch (get_itbl(update_frame)->type) {
2279 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2283 bh = (StgBlockingQueue *)update_frame->updatee;
2285 /* if the thunk is already blackholed, it means we've also
2286 * already blackholed the rest of the thunks on this stack,
2287 * so we can stop early.
2290 /* Don't for now: when we enter a CAF, we create a black hole on
2291 * the heap and make the update frame point to it. Thus the
2292 * above optimisation doesn't apply.
2294 if (bh->header.info != &BLACKHOLE_info
2295 && bh->header.info != &BLACKHOLE_BQ_info
2296 && bh->header.info != &CAF_BLACKHOLE_info) {
2297 SET_INFO(bh,&BLACKHOLE_info);
2300 update_frame = update_frame->link;
2304 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2310 barf("threadPaused");
2315 /* -----------------------------------------------------------------------------
2318 * Code largely pinched from old RTS, then hacked to bits. We also do
2319 * lazy black holing here.
2321 * -------------------------------------------------------------------------- */
2324 threadSqueezeStack(StgTSO *tso)
2326 lnat displacement = 0;
2327 StgUpdateFrame *frame;
2328 StgUpdateFrame *next_frame; /* Temporally next */
2329 StgUpdateFrame *prev_frame; /* Temporally previous */
2331 rtsBool prev_was_update_frame;
2333 bottom = &(tso->stack[tso->stack_size]);
2336 /* There must be at least one frame, namely the STOP_FRAME.
2338 ASSERT((P_)frame < bottom);
2340 /* Walk down the stack, reversing the links between frames so that
2341 * we can walk back up as we squeeze from the bottom. Note that
2342 * next_frame and prev_frame refer to next and previous as they were
2343 * added to the stack, rather than the way we see them in this
2344 * walk. (It makes the next loop less confusing.)
2346 * Could stop if we find an update frame pointing to a black hole,
2347 * but see comment in threadLazyBlackHole().
2351 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2352 prev_frame = frame->link;
2353 frame->link = next_frame;
2358 /* Now, we're at the bottom. Frame points to the lowest update
2359 * frame on the stack, and its link actually points to the frame
2360 * above. We have to walk back up the stack, squeezing out empty
2361 * update frames and turning the pointers back around on the way
2364 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2365 * we never want to eliminate it anyway. Just walk one step up
2366 * before starting to squeeze. When you get to the topmost frame,
2367 * remember that there are still some words above it that might have
2374 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2377 * Loop through all of the frames (everything except the very
2378 * bottom). Things are complicated by the fact that we have
2379 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2380 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2382 while (frame != NULL) {
2384 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2385 rtsBool is_update_frame;
2387 next_frame = frame->link;
2388 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2391 * 1. both the previous and current frame are update frames
2392 * 2. the current frame is empty
2394 if (prev_was_update_frame && is_update_frame &&
2395 (P_)prev_frame == frame_bottom + displacement) {
2397 /* Now squeeze out the current frame */
2398 StgClosure *updatee_keep = prev_frame->updatee;
2399 StgClosure *updatee_bypass = frame->updatee;
2402 fprintf(stderr, "squeezing frame at %p\n", frame);
2405 /* Deal with blocking queues. If both updatees have blocked
2406 * threads, then we should merge the queues into the update
2407 * frame that we're keeping.
2409 * Alternatively, we could just wake them up: they'll just go
2410 * straight to sleep on the proper blackhole! This is less code
2411 * and probably less bug prone, although it's probably much
2414 #if 0 /* do it properly... */
2415 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2416 /* Sigh. It has one. Don't lose those threads! */
2417 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2418 /* Urgh. Two queues. Merge them. */
2419 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2421 while (keep_tso->link != END_TSO_QUEUE) {
2422 keep_tso = keep_tso->link;
2424 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2427 /* For simplicity, just swap the BQ for the BH */
2428 P_ temp = updatee_keep;
2430 updatee_keep = updatee_bypass;
2431 updatee_bypass = temp;
2433 /* Record the swap in the kept frame (below) */
2434 prev_frame->updatee = updatee_keep;
2439 TICK_UPD_SQUEEZED();
2440 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2442 sp = (P_)frame - 1; /* sp = stuff to slide */
2443 displacement += sizeofW(StgUpdateFrame);
2446 /* No squeeze for this frame */
2447 sp = frame_bottom - 1; /* Keep the current frame */
2449 /* Do lazy black-holing.
2451 if (is_update_frame) {
2452 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2453 if (bh->header.info != &BLACKHOLE_info
2454 && bh->header.info != &BLACKHOLE_BQ_info
2455 && bh->header.info != &CAF_BLACKHOLE_info
2457 SET_INFO(bh,&BLACKHOLE_info);
2461 /* Fix the link in the current frame (should point to the frame below) */
2462 frame->link = prev_frame;
2463 prev_was_update_frame = is_update_frame;
2466 /* Now slide all words from sp up to the next frame */
2468 if (displacement > 0) {
2469 P_ next_frame_bottom;
2471 if (next_frame != NULL)
2472 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2474 next_frame_bottom = tso->sp - 1;
2477 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2481 while (sp >= next_frame_bottom) {
2482 sp[displacement] = *sp;
2486 (P_)prev_frame = (P_)frame + displacement;
2490 tso->sp += displacement;
2491 tso->su = prev_frame;
2494 /* -----------------------------------------------------------------------------
2497 * We have to prepare for GC - this means doing lazy black holing
2498 * here. We also take the opportunity to do stack squeezing if it's
2500 * -------------------------------------------------------------------------- */
2503 threadPaused(StgTSO *tso)
2505 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2506 threadSqueezeStack(tso); /* does black holing too */
2508 threadLazyBlackHole(tso);