1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.11 1999/01/18 15:21:37 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
26 /* STATIC OBJECT LIST.
29 * We maintain a linked list of static objects that are still live.
30 * The requirements for this list are:
32 * - we need to scan the list while adding to it, in order to
33 * scavenge all the static objects (in the same way that
34 * breadth-first scavenging works for dynamic objects).
36 * - we need to be able to tell whether an object is already on
37 * the list, to break loops.
39 * Each static object has a "static link field", which we use for
40 * linking objects on to the list. We use a stack-type list, consing
41 * objects on the front as they are added (this means that the
42 * scavenge phase is depth-first, not breadth-first, but that
45 * A separate list is kept for objects that have been scavenged
46 * already - this is so that we can zero all the marks afterwards.
48 * An object is on the list if its static link field is non-zero; this
49 * means that we have to mark the end of the list with '1', not NULL.
51 * Extra notes for generational GC:
53 * Each generation has a static object list associated with it. When
54 * collecting generations up to N, we treat the static object lists
55 * from generations > N as roots.
57 * We build up a static object list while collecting generations 0..N,
58 * which is then appended to the static object list of generation N+1.
60 StgClosure* static_objects; /* live static objects */
61 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
63 /* N is the oldest generation being collected, where the generations
64 * are numbered starting at 0. A major GC (indicated by the major_gc
65 * flag) is when we're collecting all generations. We only attempt to
66 * deal with static objects and GC CAFs when doing a major GC.
69 static rtsBool major_gc;
71 /* Youngest generation that objects should be evacuated to in
72 * evacuate(). (Logically an argument to evacuate, but it's static
73 * a lot of the time so we optimise it into a global variable).
79 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
80 static rtsBool weak_done; /* all done for this pass */
82 /* Flag indicating failure to evacuate an object to the desired
85 static rtsBool failed_to_evac;
87 /* -----------------------------------------------------------------------------
88 Static function declarations
89 -------------------------------------------------------------------------- */
91 static StgClosure *evacuate(StgClosure *q);
92 static void zeroStaticObjectList(StgClosure* first_static);
93 static rtsBool traverse_weak_ptr_list(void);
94 static void zeroMutableList(StgMutClosure *first);
95 static void revertDeadCAFs(void);
97 static void scavenge_stack(StgPtr p, StgPtr stack_end);
98 static void scavenge_large(step *step);
99 static void scavenge(step *step);
100 static void scavenge_static(void);
101 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
104 static void gcCAFs(void);
107 /* -----------------------------------------------------------------------------
110 For garbage collecting generation N (and all younger generations):
112 - follow all pointers in the root set. the root set includes all
113 mutable objects in all steps in all generations.
115 - for each pointer, evacuate the object it points to into either
116 + to-space in the next higher step in that generation, if one exists,
117 + if the object's generation == N, then evacuate it to the next
118 generation if one exists, or else to-space in the current
120 + if the object's generation < N, then evacuate it to to-space
121 in the next generation.
123 - repeatedly scavenge to-space from each step in each generation
124 being collected until no more objects can be evacuated.
126 - free from-space in each step, and set from-space = to-space.
128 -------------------------------------------------------------------------- */
130 void GarbageCollect(void (*get_roots)(void))
134 lnat live, allocated, collected = 0;
138 CostCentreStack *prev_CCS;
141 /* tell the stats department that we've started a GC */
144 /* attribute any costs to CCS_GC */
150 /* We might have been called from Haskell land by _ccall_GC, in
151 * which case we need to call threadPaused() because the scheduler
152 * won't have done it.
154 if (CurrentTSO) { threadPaused(CurrentTSO); }
156 /* Approximate how much we allocated: number of blocks in the
157 * nursery + blocks allocated via allocate() - unused nusery blocks.
158 * This leaves a little slop at the end of each block, and doesn't
159 * take into account large objects (ToDo).
161 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
162 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
163 allocated -= BLOCK_SIZE_W;
166 /* Figure out which generation to collect
168 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
169 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
173 major_gc = (N == RtsFlags.GcFlags.generations-1);
175 /* check stack sanity *before* GC (ToDo: check all threads) */
176 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
177 IF_DEBUG(sanity, checkFreeListSanity());
179 /* Initialise the static object lists
181 static_objects = END_OF_STATIC_LIST;
182 scavenged_static_objects = END_OF_STATIC_LIST;
184 /* zero the mutable list for the oldest generation (see comment by
185 * zeroMutableList below).
188 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
191 /* Initialise to-space in all the generations/steps that we're
194 for (g = 0; g <= N; g++) {
195 generations[g].mut_list = END_MUT_LIST;
197 for (s = 0; s < generations[g].n_steps; s++) {
198 /* generation 0, step 0 doesn't need to-space */
199 if (g == 0 && s == 0) { continue; }
200 /* Get a free block for to-space. Extra blocks will be chained on
204 step = &generations[g].steps[s];
205 ASSERT(step->gen->no == g);
206 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
207 bd->gen = &generations[g];
210 bd->evacuated = 1; /* it's a to-space block */
211 step->hp = bd->start;
212 step->hpLim = step->hp + BLOCK_SIZE_W;
215 step->to_blocks = 1; /* ???? */
216 step->scan = bd->start;
218 step->new_large_objects = NULL;
219 step->scavenged_large_objects = NULL;
220 /* mark the large objects as not evacuated yet */
221 for (bd = step->large_objects; bd; bd = bd->link) {
227 /* make sure the older generations have at least one block to
228 * allocate into (this makes things easier for copy(), see below.
230 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
231 for (s = 0; s < generations[g].n_steps; s++) {
232 step = &generations[g].steps[s];
233 if (step->hp_bd == NULL) {
235 bd->gen = &generations[g];
238 bd->evacuated = 0; /* *not* a to-space block */
239 step->hp = bd->start;
240 step->hpLim = step->hp + BLOCK_SIZE_W;
245 /* Set the scan pointer for older generations: remember we
246 * still have to scavenge objects that have been promoted. */
247 step->scan = step->hp;
248 step->scan_bd = step->hp_bd;
249 step->to_space = NULL;
251 step->new_large_objects = NULL;
252 step->scavenged_large_objects = NULL;
256 /* -----------------------------------------------------------------------
257 * follow all the roots that we know about:
258 * - mutable lists from each generation > N
259 * we want to *scavenge* these roots, not evacuate them: they're not
260 * going to move in this GC.
261 * Also: do them in reverse generation order. This is because we
262 * often want to promote objects that are pointed to by older
263 * generations early, so we don't have to repeatedly copy them.
264 * Doing the generations in reverse order ensures that we don't end
265 * up in the situation where we want to evac an object to gen 3 and
266 * it has already been evaced to gen 2.
269 StgMutClosure *tmp, **pp;
270 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
271 generations[g].saved_mut_list = generations[g].mut_list;
272 generations[g].mut_list = END_MUT_LIST;
275 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
276 tmp = scavenge_mutable_list(generations[g].saved_mut_list, g);
277 pp = &generations[g].mut_list;
278 while (*pp != END_MUT_LIST) {
279 pp = &(*pp)->mut_link;
285 /* follow all the roots that the application knows about.
290 /* And don't forget to mark the TSO if we got here direct from
293 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
296 /* Mark the weak pointer list, and prepare to detect dead weak
300 old_weak_ptr_list = weak_ptr_list;
301 weak_ptr_list = NULL;
302 weak_done = rtsFalse;
306 /* ToDo: To fix the caf leak, we need to make the commented out
307 * parts of this code do something sensible - as described in
310 extern void markHugsObjects(void);
312 /* ToDo: This (undefined) function should contain the scavenge
313 * loop immediately below this block of code - but I'm not sure
314 * enough of the details to do this myself.
316 scavengeEverything();
317 /* revert dead CAFs and update enteredCAFs list */
322 /* This will keep the CAFs and the attached BCOs alive
323 * but the values will have been reverted
325 scavengeEverything();
330 /* -------------------------------------------------------------------------
331 * Repeatedly scavenge all the areas we know about until there's no
332 * more scavenging to be done.
339 /* scavenge static objects */
340 if (major_gc && static_objects != END_OF_STATIC_LIST) {
344 /* When scavenging the older generations: Objects may have been
345 * evacuated from generations <= N into older generations, and we
346 * need to scavenge these objects. We're going to try to ensure that
347 * any evacuations that occur move the objects into at least the
348 * same generation as the object being scavenged, otherwise we
349 * have to create new entries on the mutable list for the older
353 /* scavenge each step in generations 0..maxgen */
356 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
357 for (s = 0; s < generations[gen].n_steps; s++) {
358 step = &generations[gen].steps[s];
360 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
364 if (step->new_large_objects != NULL) {
365 scavenge_large(step);
371 if (flag) { goto loop; }
373 /* must be last... */
374 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
379 /* Set the maximum blocks for the oldest generation, based on twice
380 * the amount of live data now, adjusted to fit the maximum heap
383 * This is an approximation, since in the worst case we'll need
384 * twice the amount of live data plus whatever space the other
387 oldest_gen->max_blocks =
388 stg_max(oldest_gen->steps[0].to_blocks * 2,
389 RtsFlags.GcFlags.minAllocAreaSize * 4);
390 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
391 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
392 if (((int)oldest_gen->max_blocks - (int)oldest_gen->steps[0].to_blocks) <
393 (RtsFlags.GcFlags.pcFreeHeap *
394 RtsFlags.GcFlags.maxHeapSize / 200)) {
399 /* run through all the generations/steps and tidy up
401 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
404 generations[g].collections++; /* for stats */
407 for (s = 0; s < generations[g].n_steps; s++) {
409 step = &generations[g].steps[s];
411 if (!(g == 0 && s == 0)) {
412 /* Tidy the end of the to-space chains */
413 step->hp_bd->free = step->hp;
414 step->hp_bd->link = NULL;
417 /* for generations we collected... */
420 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
422 /* free old memory and shift to-space into from-space for all
423 * the collected steps (except the allocation area). These
424 * freed blocks will probaby be quickly recycled.
426 if (!(g == 0 && s == 0)) {
427 freeChain(step->blocks);
428 step->blocks = step->to_space;
429 step->n_blocks = step->to_blocks;
430 step->to_space = NULL;
432 for (bd = step->blocks; bd != NULL; bd = bd->link) {
433 bd->evacuated = 0; /* now from-space */
437 /* LARGE OBJECTS. The current live large objects are chained on
438 * scavenged_large, having been moved during garbage
439 * collection from large_objects. Any objects left on
440 * large_objects list are therefore dead, so we free them here.
442 for (bd = step->large_objects; bd != NULL; bd = next) {
447 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
450 step->large_objects = step->scavenged_large_objects;
452 /* Set the maximum blocks for this generation, interpolating
453 * between the maximum size of the oldest and youngest
456 * max_blocks = alloc_area_size +
457 * (oldgen_max_blocks - alloc_area_size) * G
458 * -----------------------------------------
462 generations[g].max_blocks =
463 RtsFlags.GcFlags.minAllocAreaSize +
464 (((oldest_gen->max_blocks - RtsFlags.GcFlags.minAllocAreaSize) * g)
465 / (RtsFlags.GcFlags.generations-1));
468 /* for older generations... */
471 /* For older generations, we need to append the
472 * scavenged_large_object list (i.e. large objects that have been
473 * promoted during this GC) to the large_object list for that step.
475 for (bd = step->scavenged_large_objects; bd; bd = next) {
478 dbl_link_onto(bd, &step->large_objects);
481 /* add the new blocks we promoted during this GC */
482 step->n_blocks += step->to_blocks;
487 /* revert dead CAFs and update enteredCAFs list */
490 /* mark the garbage collected CAFs as dead */
492 if (major_gc) { gcCAFs(); }
495 /* zero the scavenged static object list */
497 zeroStaticObjectList(scavenged_static_objects);
502 for (bd = g0s0->blocks; bd; bd = bd->link) {
503 bd->free = bd->start;
504 ASSERT(bd->gen == g0);
505 ASSERT(bd->step == g0s0);
507 current_nursery = g0s0->blocks;
510 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
511 for (s = 0; s < generations[g].n_steps; s++) {
512 /* approximate amount of live data (doesn't take into account slop
513 * at end of each block). ToDo: this more accurately.
515 if (g == 0 && s == 0) { continue; }
516 step = &generations[g].steps[s];
517 live += step->n_blocks * BLOCK_SIZE_W +
518 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
522 /* Free the small objects allocated via allocate(), since this will
523 * all have been copied into G0S1 now.
525 if (small_alloc_list != NULL) {
526 freeChain(small_alloc_list);
528 small_alloc_list = NULL;
530 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
532 /* start any pending finalisers */
533 scheduleFinalisers(old_weak_ptr_list);
535 /* check sanity after GC */
537 for (g = 0; g <= N; g++) {
538 for (s = 0; s < generations[g].n_steps; s++) {
539 if (g == 0 && s == 0) { continue; }
540 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
541 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
544 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
545 for (s = 0; s < generations[g].n_steps; s++) {
546 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks,
547 generations[g].steps[s].blocks->start));
548 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
551 IF_DEBUG(sanity, checkFreeListSanity());
554 IF_DEBUG(gc, stat_describe_gens());
557 /* symbol-table based profiling */
558 /* heapCensus(to_space); */ /* ToDo */
561 /* restore enclosing cost centre */
566 /* check for memory leaks if sanity checking is on */
567 IF_DEBUG(sanity, memInventory());
569 /* ok, GC over: tell the stats department what happened. */
570 stat_endGC(allocated, collected, live, N);
573 /* -----------------------------------------------------------------------------
576 traverse_weak_ptr_list is called possibly many times during garbage
577 collection. It returns a flag indicating whether it did any work
578 (i.e. called evacuate on any live pointers).
580 Invariant: traverse_weak_ptr_list is called when the heap is in an
581 idempotent state. That means that there are no pending
582 evacuate/scavenge operations. This invariant helps the weak
583 pointer code decide which weak pointers are dead - if there are no
584 new live weak pointers, then all the currently unreachable ones are
587 For generational GC: we just don't try to finalise weak pointers in
588 older generations than the one we're collecting. This could
589 probably be optimised by keeping per-generation lists of weak
590 pointers, but for a few weak pointers this scheme will work.
591 -------------------------------------------------------------------------- */
594 traverse_weak_ptr_list(void)
596 StgWeak *w, **last_w, *next_w;
598 const StgInfoTable *info;
599 rtsBool flag = rtsFalse;
601 if (weak_done) { return rtsFalse; }
603 /* doesn't matter where we evacuate values/finalisers to, since
604 * these pointers are treated as roots (iff the keys are alive).
608 last_w = &old_weak_ptr_list;
609 for (w = old_weak_ptr_list; w; w = next_w) {
612 /* ignore weak pointers in older generations */
613 if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) {
614 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive (in old gen) at %p\n", w));
615 /* remove this weak ptr from the old_weak_ptr list */
617 /* and put it on the new weak ptr list */
619 w->link = weak_ptr_list;
625 info = get_itbl(target);
626 switch (info->type) {
631 case IND_OLDGEN: /* rely on compatible layout with StgInd */
632 case IND_OLDGEN_PERM:
633 /* follow indirections */
634 target = ((StgInd *)target)->indirectee;
638 /* If key is alive, evacuate value and finaliser and
639 * place weak ptr on new weak ptr list.
641 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w));
642 w->key = ((StgEvacuated *)target)->evacuee;
643 w->value = evacuate(w->value);
644 w->finaliser = evacuate(w->finaliser);
646 /* remove this weak ptr from the old_weak_ptr list */
649 /* and put it on the new weak ptr list */
651 w->link = weak_ptr_list;
656 default: /* key is dead */
663 /* If we didn't make any changes, then we can go round and kill all
664 * the dead weak pointers. The old_weak_ptr list is used as a list
665 * of pending finalisers later on.
667 if (flag == rtsFalse) {
668 for (w = old_weak_ptr_list; w; w = w->link) {
669 w->value = evacuate(w->value);
670 w->finaliser = evacuate(w->finaliser);
679 MarkRoot(StgClosure *root)
681 root = evacuate(root);
685 static inline void addBlock(step *step)
687 bdescr *bd = allocBlock();
691 if (step->gen->no <= N) {
697 step->hp_bd->free = step->hp;
698 step->hp_bd->link = bd;
699 step->hp = bd->start;
700 step->hpLim = step->hp + BLOCK_SIZE_W;
705 static __inline__ StgClosure *
706 copy(StgClosure *src, nat size, bdescr *bd)
711 /* Find out where we're going, using the handy "to" pointer in
712 * the step of the source object. If it turns out we need to
713 * evacuate to an older generation, adjust it here (see comment
717 if (step->gen->no < evac_gen) {
718 step = &generations[evac_gen].steps[0];
721 /* chain a new block onto the to-space for the destination step if
724 if (step->hp + size >= step->hpLim) {
730 for(to = dest, from = (P_)src; size>0; --size) {
733 return (StgClosure *)dest;
736 /* Special version of copy() for when we only want to copy the info
737 * pointer of an object, but reserve some padding after it. This is
738 * used to optimise evacuation of BLACKHOLEs.
741 static __inline__ StgClosure *
742 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, bdescr *bd)
748 if (step->gen->no < evac_gen) {
749 step = &generations[evac_gen].steps[0];
752 if (step->hp + size_to_reserve >= step->hpLim) {
757 step->hp += size_to_reserve;
758 for(to = dest, from = (P_)src; size_to_copy>0; --size_to_copy) {
762 return (StgClosure *)dest;
765 static __inline__ void
766 upd_evacuee(StgClosure *p, StgClosure *dest)
768 StgEvacuated *q = (StgEvacuated *)p;
770 SET_INFO(q,&EVACUATED_info);
774 /* -----------------------------------------------------------------------------
775 Evacuate a mutable object
777 If we evacuate a mutable object to an old generation, cons the
778 object onto the older generation's mutable list.
779 -------------------------------------------------------------------------- */
782 evacuate_mutable(StgMutClosure *c)
787 if (bd->gen->no > 0) {
788 c->mut_link = bd->gen->mut_list;
789 bd->gen->mut_list = c;
793 /* -----------------------------------------------------------------------------
794 Evacuate a large object
796 This just consists of removing the object from the (doubly-linked)
797 large_alloc_list, and linking it on to the (singly-linked)
798 new_large_objects list, from where it will be scavenged later.
800 Convention: bd->evacuated is /= 0 for a large object that has been
801 evacuated, or 0 otherwise.
802 -------------------------------------------------------------------------- */
805 evacuate_large(StgPtr p, rtsBool mutable)
807 bdescr *bd = Bdescr(p);
810 /* should point to the beginning of the block */
811 ASSERT(((W_)p & BLOCK_MASK) == 0);
813 /* already evacuated? */
815 /* Don't forget to set the failed_to_evac flag if we didn't get
816 * the desired destination (see comments in evacuate()).
818 if (bd->gen->no < evac_gen) {
819 failed_to_evac = rtsTrue;
825 /* remove from large_object list */
827 bd->back->link = bd->link;
828 } else { /* first object in the list */
829 step->large_objects = bd->link;
832 bd->link->back = bd->back;
835 /* link it on to the evacuated large object list of the destination step
838 if (step->gen->no < evac_gen) {
839 step = &generations[evac_gen].steps[0];
844 bd->link = step->new_large_objects;
845 step->new_large_objects = bd;
849 evacuate_mutable((StgMutClosure *)p);
853 /* -----------------------------------------------------------------------------
854 Adding a MUT_CONS to an older generation.
856 This is necessary from time to time when we end up with an
857 old-to-new generation pointer in a non-mutable object. We defer
858 the promotion until the next GC.
859 -------------------------------------------------------------------------- */
862 mkMutCons(StgClosure *ptr, generation *gen)
867 step = &gen->steps[0];
869 /* chain a new block onto the to-space for the destination step if
872 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
876 q = (StgMutVar *)step->hp;
877 step->hp += sizeofW(StgMutVar);
879 SET_HDR(q,&MUT_CONS_info,CCS_GC);
881 evacuate_mutable((StgMutClosure *)q);
883 return (StgClosure *)q;
886 /* -----------------------------------------------------------------------------
889 This is called (eventually) for every live object in the system.
891 The caller to evacuate specifies a desired generation in the
892 evac_gen global variable. The following conditions apply to
893 evacuating an object which resides in generation M when we're
894 collecting up to generation N
898 else evac to step->to
900 if M < evac_gen evac to evac_gen, step 0
902 if the object is already evacuated, then we check which generation
905 if M >= evac_gen do nothing
906 if M < evac_gen set failed_to_evac flag to indicate that we
907 didn't manage to evacuate this object into evac_gen.
909 -------------------------------------------------------------------------- */
913 evacuate(StgClosure *q)
917 const StgInfoTable *info;
920 if (!LOOKS_LIKE_STATIC(q)) {
922 if (bd->gen->no > N) {
923 /* Can't evacuate this object, because it's in a generation
924 * older than the ones we're collecting. Let's hope that it's
925 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
927 if (bd->gen->no < evac_gen) {
929 failed_to_evac = rtsTrue;
935 /* make sure the info pointer is into text space */
936 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
937 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
940 switch (info -> type) {
943 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),bd);
949 to = copy(q,sizeW_fromITBL(info),bd);
951 evacuate_mutable((StgMutClosure *)to);
958 case IND_OLDGEN_PERM:
963 to = copy(q,sizeW_fromITBL(info),bd);
969 to = copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),bd);
974 to = copy(q,BLACKHOLE_sizeW(),bd);
976 evacuate_mutable((StgMutClosure *)to);
981 const StgInfoTable* selectee_info;
982 StgClosure* selectee = ((StgSelector*)q)->selectee;
985 selectee_info = get_itbl(selectee);
986 switch (selectee_info->type) {
990 StgNat32 offset = info->layout.selector_offset;
992 /* check that the size is in range */
994 (StgNat32)(selectee_info->layout.payload.ptrs +
995 selectee_info->layout.payload.nptrs));
997 /* perform the selection! */
998 q = selectee->payload[offset];
1000 /* if we're already in to-space, there's no need to continue
1001 * with the evacuation, just update the source address with
1002 * a pointer to the (evacuated) constructor field.
1004 if (IS_USER_PTR(q)) {
1005 bdescr *bd = Bdescr((P_)q);
1006 if (bd->evacuated) {
1007 if (bd->gen->no < evac_gen) {
1008 failed_to_evac = rtsTrue;
1014 /* otherwise, carry on and evacuate this constructor field,
1015 * (but not the constructor itself)
1024 case IND_OLDGEN_PERM:
1025 selectee = stgCast(StgInd *,selectee)->indirectee;
1029 selectee = stgCast(StgCAF *,selectee)->value;
1033 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
1038 case THUNK_SELECTOR:
1039 /* aargh - do recursively???? */
1044 /* not evaluated yet */
1048 barf("evacuate: THUNK_SELECTOR: strange selectee");
1051 to = copy(q,THUNK_SELECTOR_sizeW(),bd);
1057 /* follow chains of indirections, don't evacuate them */
1058 q = ((StgInd*)q)->indirectee;
1061 /* ToDo: optimise STATIC_LINK for known cases.
1062 - FUN_STATIC : payload[0]
1063 - THUNK_STATIC : payload[1]
1064 - IND_STATIC : payload[1]
1068 if (info->srt_len == 0) { /* small optimisation */
1074 /* don't want to evacuate these, but we do want to follow pointers
1075 * from SRTs - see scavenge_static.
1078 /* put the object on the static list, if necessary.
1080 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1081 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1082 static_objects = (StgClosure *)q;
1086 case CONSTR_INTLIKE:
1087 case CONSTR_CHARLIKE:
1088 case CONSTR_NOCAF_STATIC:
1089 /* no need to put these on the static linked list, they don't need
1104 /* shouldn't see these */
1105 barf("evacuate: stack frame\n");
1109 /* these are special - the payload is a copy of a chunk of stack,
1111 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),bd);
1116 /* Already evacuated, just return the forwarding address.
1117 * HOWEVER: if the requested destination generation (evac_gen) is
1118 * older than the actual generation (because the object was
1119 * already evacuated to a younger generation) then we have to
1120 * set the failed_to_evac flag to indicate that we couldn't
1121 * manage to promote the object to the desired generation.
1123 if (evac_gen > 0) { /* optimisation */
1124 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1125 if (Bdescr((P_)p)->gen->no < evac_gen) {
1126 /* fprintf(stderr,"evac failed!\n");*/
1127 failed_to_evac = rtsTrue;
1130 return ((StgEvacuated*)q)->evacuee;
1135 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1137 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1138 evacuate_large((P_)q, rtsFalse);
1141 /* just copy the block */
1142 to = copy(q,size,bd);
1149 case MUT_ARR_PTRS_FROZEN:
1151 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1153 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1154 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1157 /* just copy the block */
1158 to = copy(q,size,bd);
1160 if (info->type == MUT_ARR_PTRS) {
1161 evacuate_mutable((StgMutClosure *)to);
1169 StgTSO *tso = stgCast(StgTSO *,q);
1170 nat size = tso_sizeW(tso);
1173 /* Large TSOs don't get moved, so no relocation is required.
1175 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1176 evacuate_large((P_)q, rtsTrue);
1179 /* To evacuate a small TSO, we need to relocate the update frame
1183 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),bd);
1185 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1187 /* relocate the stack pointers... */
1188 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1189 new_tso->sp = (StgPtr)new_tso->sp + diff;
1190 new_tso->splim = (StgPtr)new_tso->splim + diff;
1192 relocate_TSO(tso, new_tso);
1193 upd_evacuee(q,(StgClosure *)new_tso);
1195 evacuate_mutable((StgMutClosure *)new_tso);
1196 return (StgClosure *)new_tso;
1202 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1206 barf("evacuate: strange closure type");
1212 /* -----------------------------------------------------------------------------
1213 relocate_TSO is called just after a TSO has been copied from src to
1214 dest. It adjusts the update frame list for the new location.
1215 -------------------------------------------------------------------------- */
1218 relocate_TSO(StgTSO *src, StgTSO *dest)
1225 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1229 while ((P_)su < dest->stack + dest->stack_size) {
1230 switch (get_itbl(su)->type) {
1232 /* GCC actually manages to common up these three cases! */
1235 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1240 cf = (StgCatchFrame *)su;
1241 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1246 sf = (StgSeqFrame *)su;
1247 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1256 barf("relocate_TSO");
1265 scavenge_srt(const StgInfoTable *info)
1267 StgClosure **srt, **srt_end;
1269 /* evacuate the SRT. If srt_len is zero, then there isn't an
1270 * srt field in the info table. That's ok, because we'll
1271 * never dereference it.
1273 srt = stgCast(StgClosure **,info->srt);
1274 srt_end = srt + info->srt_len;
1275 for (; srt < srt_end; srt++) {
1280 /* -----------------------------------------------------------------------------
1281 Scavenge a given step until there are no more objects in this step
1284 evac_gen is set by the caller to be either zero (for a step in a
1285 generation < N) or G where G is the generation of the step being
1288 We sometimes temporarily change evac_gen back to zero if we're
1289 scavenging a mutable object where early promotion isn't such a good
1291 -------------------------------------------------------------------------- */
1295 scavenge(step *step)
1298 const StgInfoTable *info;
1300 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1305 failed_to_evac = rtsFalse;
1307 /* scavenge phase - standard breadth-first scavenging of the
1311 while (bd != step->hp_bd || p < step->hp) {
1313 /* If we're at the end of this block, move on to the next block */
1314 if (bd != step->hp_bd && p == bd->free) {
1320 q = p; /* save ptr to object */
1322 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1323 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1325 info = get_itbl((StgClosure *)p);
1326 switch (info -> type) {
1330 StgBCO* bco = stgCast(StgBCO*,p);
1332 for (i = 0; i < bco->n_ptrs; i++) {
1333 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1335 p += bco_sizeW(bco);
1340 /* treat MVars specially, because we don't want to evacuate the
1341 * mut_link field in the middle of the closure.
1344 StgMVar *mvar = ((StgMVar *)p);
1346 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1347 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1348 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1349 p += sizeofW(StgMVar);
1350 evac_gen = saved_evac_gen;
1363 case IND_OLDGEN_PERM:
1369 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1370 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1371 (StgClosure *)*p = evacuate((StgClosure *)*p);
1373 p += info->layout.payload.nptrs;
1378 /* ignore MUT_CONSs */
1379 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1381 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1382 evac_gen = saved_evac_gen;
1384 p += sizeofW(StgMutVar);
1389 p += BLACKHOLE_sizeW();
1394 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1395 (StgClosure *)bh->blocking_queue =
1396 evacuate((StgClosure *)bh->blocking_queue);
1397 if (failed_to_evac) {
1398 failed_to_evac = rtsFalse;
1399 evacuate_mutable((StgMutClosure *)bh);
1401 p += BLACKHOLE_sizeW();
1405 case THUNK_SELECTOR:
1407 StgSelector *s = (StgSelector *)p;
1408 s->selectee = evacuate(s->selectee);
1409 p += THUNK_SELECTOR_sizeW();
1415 barf("scavenge:IND???\n");
1417 case CONSTR_INTLIKE:
1418 case CONSTR_CHARLIKE:
1420 case CONSTR_NOCAF_STATIC:
1424 /* Shouldn't see a static object here. */
1425 barf("scavenge: STATIC object\n");
1437 /* Shouldn't see stack frames here. */
1438 barf("scavenge: stack frame\n");
1440 case AP_UPD: /* same as PAPs */
1442 /* Treat a PAP just like a section of stack, not forgetting to
1443 * evacuate the function pointer too...
1446 StgPAP* pap = stgCast(StgPAP*,p);
1448 pap->fun = evacuate(pap->fun);
1449 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1450 p += pap_sizeW(pap);
1456 /* nothing to follow */
1457 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1461 /* follow everything */
1465 evac_gen = 0; /* repeatedly mutable */
1466 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1467 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1468 (StgClosure *)*p = evacuate((StgClosure *)*p);
1470 evac_gen = saved_evac_gen;
1474 case MUT_ARR_PTRS_FROZEN:
1475 /* follow everything */
1477 StgPtr start = p, next;
1479 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1480 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1481 (StgClosure *)*p = evacuate((StgClosure *)*p);
1483 if (failed_to_evac) {
1484 /* we can do this easier... */
1485 evacuate_mutable((StgMutClosure *)start);
1486 failed_to_evac = rtsFalse;
1497 /* chase the link field for any TSOs on the same queue */
1498 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1499 /* scavenge this thread's stack */
1500 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1501 evac_gen = saved_evac_gen;
1502 p += tso_sizeW(tso);
1509 barf("scavenge: unimplemented/strange closure type\n");
1515 /* If we didn't manage to promote all the objects pointed to by
1516 * the current object, then we have to designate this object as
1517 * mutable (because it contains old-to-new generation pointers).
1519 if (failed_to_evac) {
1520 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1521 failed_to_evac = rtsFalse;
1529 /* -----------------------------------------------------------------------------
1530 Scavenge one object.
1532 This is used for objects that are temporarily marked as mutable
1533 because they contain old-to-new generation pointers. Only certain
1534 objects can have this property.
1535 -------------------------------------------------------------------------- */
1537 scavenge_one(StgPtr p)
1542 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1543 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1545 info = get_itbl((StgClosure *)p);
1547 switch (info -> type) {
1555 case IND_OLDGEN_PERM:
1561 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1562 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1563 (StgClosure *)*p = evacuate((StgClosure *)*p);
1572 case THUNK_SELECTOR:
1574 StgSelector *s = (StgSelector *)p;
1575 s->selectee = evacuate(s->selectee);
1579 case AP_UPD: /* same as PAPs */
1581 /* Treat a PAP just like a section of stack, not forgetting to
1582 * evacuate the function pointer too...
1585 StgPAP* pap = stgCast(StgPAP*,p);
1587 pap->fun = evacuate(pap->fun);
1588 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1593 /* This might happen if for instance a MUT_CONS was pointing to a
1594 * THUNK which has since been updated. The IND_OLDGEN will
1595 * be on the mutable list anyway, so we don't need to do anything
1601 barf("scavenge_one: strange object");
1604 no_luck = failed_to_evac;
1605 failed_to_evac = rtsFalse;
1610 /* -----------------------------------------------------------------------------
1611 Scavenging mutable lists.
1613 We treat the mutable list of each generation > N (i.e. all the
1614 generations older than the one being collected) as roots. We also
1615 remove non-mutable objects from the mutable list at this point.
1616 -------------------------------------------------------------------------- */
1618 static StgMutClosure *
1619 scavenge_mutable_list(StgMutClosure *p, nat gen)
1622 StgMutClosure *start;
1623 StgMutClosure **prev;
1630 failed_to_evac = rtsFalse;
1632 for (; p != END_MUT_LIST; p = *prev) {
1634 /* make sure the info pointer is into text space */
1635 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1636 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1639 switch(info->type) {
1641 case MUT_ARR_PTRS_FROZEN:
1642 /* remove this guy from the mutable list, but follow the ptrs
1643 * anyway (and make sure they get promoted to this gen).
1648 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1650 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1651 (StgClosure *)*q = evacuate((StgClosure *)*q);
1655 if (failed_to_evac) {
1656 failed_to_evac = rtsFalse;
1657 prev = &p->mut_link;
1659 *prev = p->mut_link;
1665 /* follow everything */
1666 prev = &p->mut_link;
1670 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1671 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1672 (StgClosure *)*q = evacuate((StgClosure *)*q);
1678 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1679 * it from the mutable list if possible by promoting whatever it
1682 if (p->header.info == &MUT_CONS_info) {
1684 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1685 /* didn't manage to promote everything, so leave the
1686 * MUT_CONS on the list.
1688 prev = &p->mut_link;
1690 *prev = p->mut_link;
1694 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1695 prev = &p->mut_link;
1701 StgMVar *mvar = (StgMVar *)p;
1702 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1703 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1704 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1705 prev = &p->mut_link;
1710 /* follow ptrs and remove this from the mutable list */
1712 StgTSO *tso = (StgTSO *)p;
1714 /* Don't bother scavenging if this thread is dead
1716 if (!(tso->whatNext == ThreadComplete ||
1717 tso->whatNext == ThreadKilled)) {
1718 /* Don't need to chase the link field for any TSOs on the
1719 * same queue. Just scavenge this thread's stack
1721 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1724 /* Don't take this TSO off the mutable list - it might still
1725 * point to some younger objects (because we set evac_gen to 0
1728 prev = &tso->mut_link;
1733 case IND_OLDGEN_PERM:
1735 /* Try to pull the indirectee into this generation, so we can
1736 * remove the indirection from the mutable list.
1739 ((StgIndOldGen *)p)->indirectee =
1740 evacuate(((StgIndOldGen *)p)->indirectee);
1743 if (failed_to_evac) {
1744 failed_to_evac = rtsFalse;
1745 prev = &p->mut_link;
1747 *prev = p->mut_link;
1748 /* the mut_link field of an IND_STATIC is overloaded as the
1749 * static link field too (it just so happens that we don't need
1750 * both at the same time), so we need to NULL it out when
1751 * removing this object from the mutable list because the static
1752 * link fields are all assumed to be NULL before doing a major
1761 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1762 (StgClosure *)bh->blocking_queue =
1763 evacuate((StgClosure *)bh->blocking_queue);
1764 prev = &p->mut_link;
1769 /* shouldn't have anything else on the mutables list */
1770 barf("scavenge_mutable_object: non-mutable object?");
1777 scavenge_static(void)
1779 StgClosure* p = static_objects;
1780 const StgInfoTable *info;
1782 /* Always evacuate straight to the oldest generation for static
1784 evac_gen = oldest_gen->no;
1786 /* keep going until we've scavenged all the objects on the linked
1788 while (p != END_OF_STATIC_LIST) {
1792 /* make sure the info pointer is into text space */
1793 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1794 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1796 /* Take this object *off* the static_objects list,
1797 * and put it on the scavenged_static_objects list.
1799 static_objects = STATIC_LINK(info,p);
1800 STATIC_LINK(info,p) = scavenged_static_objects;
1801 scavenged_static_objects = p;
1803 switch (info -> type) {
1807 StgInd *ind = (StgInd *)p;
1808 ind->indirectee = evacuate(ind->indirectee);
1810 /* might fail to evacuate it, in which case we have to pop it
1811 * back on the mutable list (and take it off the
1812 * scavenged_static list because the static link and mut link
1813 * pointers are one and the same).
1815 if (failed_to_evac) {
1816 failed_to_evac = rtsFalse;
1817 scavenged_static_objects = STATIC_LINK(info,p);
1818 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
1819 oldest_gen->mut_list = (StgMutClosure *)ind;
1833 next = (P_)p->payload + info->layout.payload.ptrs;
1834 /* evacuate the pointers */
1835 for (q = (P_)p->payload; q < next; q++) {
1836 (StgClosure *)*q = evacuate((StgClosure *)*q);
1842 barf("scavenge_static");
1845 ASSERT(failed_to_evac == rtsFalse);
1847 /* get the next static object from the list. Remeber, there might
1848 * be more stuff on this list now that we've done some evacuating!
1849 * (static_objects is a global)
1855 /* -----------------------------------------------------------------------------
1856 scavenge_stack walks over a section of stack and evacuates all the
1857 objects pointed to by it. We can use the same code for walking
1858 PAPs, since these are just sections of copied stack.
1859 -------------------------------------------------------------------------- */
1862 scavenge_stack(StgPtr p, StgPtr stack_end)
1865 const StgInfoTable* info;
1869 * Each time around this loop, we are looking at a chunk of stack
1870 * that starts with either a pending argument section or an
1871 * activation record.
1874 while (p < stack_end) {
1875 q = *stgCast(StgPtr*,p);
1877 /* If we've got a tag, skip over that many words on the stack */
1878 if (IS_ARG_TAG(stgCast(StgWord,q))) {
1883 /* Is q a pointer to a closure?
1885 if (! LOOKS_LIKE_GHC_INFO(q)) {
1888 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
1889 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
1891 /* otherwise, must be a pointer into the allocation space.
1895 (StgClosure *)*p = evacuate((StgClosure *)q);
1901 * Otherwise, q must be the info pointer of an activation
1902 * record. All activation records have 'bitmap' style layout
1905 info = get_itbl(stgCast(StgClosure*,p));
1907 switch (info->type) {
1909 /* Dynamic bitmap: the mask is stored on the stack */
1911 bitmap = stgCast(StgRetDyn*,p)->liveness;
1912 p = &payloadWord(stgCast(StgRetDyn*,p),0);
1915 /* probably a slow-entry point return address: */
1921 /* Specialised code for update frames, since they're so common.
1922 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
1923 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
1927 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1929 StgClosureType type = get_itbl(frame->updatee)->type;
1931 p += sizeofW(StgUpdateFrame);
1932 if (type == EVACUATED) {
1933 frame->updatee = evacuate(frame->updatee);
1936 bdescr *bd = Bdescr((P_)frame->updatee);
1937 if (bd->gen->no > N) {
1938 if (bd->gen->no < evac_gen) {
1939 failed_to_evac = rtsTrue;
1946 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
1947 sizeofW(StgHeader), bd);
1948 upd_evacuee(frame->updatee,to);
1949 frame->updatee = to;
1952 to = copy(frame->updatee, BLACKHOLE_sizeW(), bd);
1953 upd_evacuee(frame->updatee,to);
1954 frame->updatee = to;
1955 evacuate_mutable((StgMutClosure *)to);
1958 barf("scavenge_stack: UPDATE_FRAME updatee");
1963 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
1970 bitmap = info->layout.bitmap;
1973 while (bitmap != 0) {
1974 if ((bitmap & 1) == 0) {
1975 (StgClosure *)*p = evacuate((StgClosure *)*p);
1978 bitmap = bitmap >> 1;
1985 /* large bitmap (> 32 entries) */
1990 StgLargeBitmap *large_bitmap;
1993 large_bitmap = info->layout.large_bitmap;
1996 for (i=0; i<large_bitmap->size; i++) {
1997 bitmap = large_bitmap->bitmap[i];
1998 q = p + sizeof(W_) * 8;
1999 while (bitmap != 0) {
2000 if ((bitmap & 1) == 0) {
2001 (StgClosure *)*p = evacuate((StgClosure *)*p);
2004 bitmap = bitmap >> 1;
2006 if (i+1 < large_bitmap->size) {
2008 (StgClosure *)*p = evacuate((StgClosure *)*p);
2014 /* and don't forget to follow the SRT */
2019 barf("scavenge_stack: weird activation record found on stack.\n");
2024 /*-----------------------------------------------------------------------------
2025 scavenge the large object list.
2027 evac_gen set by caller; similar games played with evac_gen as with
2028 scavenge() - see comment at the top of scavenge(). Most large
2029 objects are (repeatedly) mutable, so most of the time evac_gen will
2031 --------------------------------------------------------------------------- */
2034 scavenge_large(step *step)
2038 const StgInfoTable* info;
2039 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2041 evac_gen = 0; /* most objects are mutable */
2042 bd = step->new_large_objects;
2044 for (; bd != NULL; bd = step->new_large_objects) {
2046 /* take this object *off* the large objects list and put it on
2047 * the scavenged large objects list. This is so that we can
2048 * treat new_large_objects as a stack and push new objects on
2049 * the front when evacuating.
2051 step->new_large_objects = bd->link;
2052 dbl_link_onto(bd, &step->scavenged_large_objects);
2055 info = get_itbl(stgCast(StgClosure*,p));
2057 switch (info->type) {
2059 /* only certain objects can be "large"... */
2063 /* nothing to follow */
2067 /* follow everything */
2071 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2072 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2073 (StgClosure *)*p = evacuate((StgClosure *)*p);
2078 case MUT_ARR_PTRS_FROZEN:
2079 /* follow everything */
2081 StgPtr start = p, next;
2083 evac_gen = saved_evac_gen; /* not really mutable */
2084 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2085 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2086 (StgClosure *)*p = evacuate((StgClosure *)*p);
2089 if (failed_to_evac) {
2090 evacuate_mutable((StgMutClosure *)start);
2097 StgBCO* bco = stgCast(StgBCO*,p);
2099 evac_gen = saved_evac_gen;
2100 for (i = 0; i < bco->n_ptrs; i++) {
2101 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2112 /* chase the link field for any TSOs on the same queue */
2113 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2114 /* scavenge this thread's stack */
2115 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2120 barf("scavenge_large: unknown/strange object");
2126 zeroStaticObjectList(StgClosure* first_static)
2130 const StgInfoTable *info;
2132 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2134 link = STATIC_LINK(info, p);
2135 STATIC_LINK(info,p) = NULL;
2139 /* This function is only needed because we share the mutable link
2140 * field with the static link field in an IND_STATIC, so we have to
2141 * zero the mut_link field before doing a major GC, which needs the
2142 * static link field.
2144 * It doesn't do any harm to zero all the mutable link fields on the
2148 zeroMutableList(StgMutClosure *first)
2150 StgMutClosure *next, *c;
2152 for (c = first; c != END_MUT_LIST; c = next) {
2158 /* -----------------------------------------------------------------------------
2160 -------------------------------------------------------------------------- */
2162 void RevertCAFs(void)
2164 while (enteredCAFs != END_CAF_LIST) {
2165 StgCAF* caf = enteredCAFs;
2167 enteredCAFs = caf->link;
2168 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2169 SET_INFO(caf,&CAF_UNENTERED_info);
2170 caf->value = stgCast(StgClosure*,0xdeadbeef);
2171 caf->link = stgCast(StgCAF*,0xdeadbeef);
2175 void revertDeadCAFs(void)
2177 StgCAF* caf = enteredCAFs;
2178 enteredCAFs = END_CAF_LIST;
2179 while (caf != END_CAF_LIST) {
2180 StgCAF* next = caf->link;
2182 switch(GET_INFO(caf)->type) {
2185 /* This object has been evacuated, it must be live. */
2186 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2187 new->link = enteredCAFs;
2193 SET_INFO(caf,&CAF_UNENTERED_info);
2194 caf->value = stgCast(StgClosure*,0xdeadbeef);
2195 caf->link = stgCast(StgCAF*,0xdeadbeef);
2199 barf("revertDeadCAFs: enteredCAFs list corrupted");
2205 /* -----------------------------------------------------------------------------
2206 Sanity code for CAF garbage collection.
2208 With DEBUG turned on, we manage a CAF list in addition to the SRT
2209 mechanism. After GC, we run down the CAF list and blackhole any
2210 CAFs which have been garbage collected. This means we get an error
2211 whenever the program tries to enter a garbage collected CAF.
2213 Any garbage collected CAFs are taken off the CAF list at the same
2215 -------------------------------------------------------------------------- */
2223 const StgInfoTable *info;
2234 ASSERT(info->type == IND_STATIC);
2236 if (STATIC_LINK(info,p) == NULL) {
2237 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2239 SET_INFO(p,&BLACKHOLE_info);
2240 p = STATIC_LINK2(info,p);
2244 pp = &STATIC_LINK2(info,p);
2251 /* fprintf(stderr, "%d CAFs live\n", i); */
2255 /* -----------------------------------------------------------------------------
2258 Whenever a thread returns to the scheduler after possibly doing
2259 some work, we have to run down the stack and black-hole all the
2260 closures referred to by update frames.
2261 -------------------------------------------------------------------------- */
2264 threadLazyBlackHole(StgTSO *tso)
2266 StgUpdateFrame *update_frame;
2267 StgBlockingQueue *bh;
2270 stack_end = &tso->stack[tso->stack_size];
2271 update_frame = tso->su;
2274 switch (get_itbl(update_frame)->type) {
2277 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2281 bh = (StgBlockingQueue *)update_frame->updatee;
2283 /* if the thunk is already blackholed, it means we've also
2284 * already blackholed the rest of the thunks on this stack,
2285 * so we can stop early.
2288 /* Don't for now: when we enter a CAF, we create a black hole on
2289 * the heap and make the update frame point to it. Thus the
2290 * above optimisation doesn't apply.
2292 if (bh->header.info != &BLACKHOLE_info
2293 && bh->header.info != &BLACKHOLE_BQ_info
2294 && bh->header.info != &CAF_BLACKHOLE_info) {
2295 SET_INFO(bh,&BLACKHOLE_info);
2298 update_frame = update_frame->link;
2302 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2308 barf("threadPaused");
2313 /* -----------------------------------------------------------------------------
2316 * Code largely pinched from old RTS, then hacked to bits. We also do
2317 * lazy black holing here.
2319 * -------------------------------------------------------------------------- */
2322 threadSqueezeStack(StgTSO *tso)
2324 lnat displacement = 0;
2325 StgUpdateFrame *frame;
2326 StgUpdateFrame *next_frame; /* Temporally next */
2327 StgUpdateFrame *prev_frame; /* Temporally previous */
2329 rtsBool prev_was_update_frame;
2331 bottom = &(tso->stack[tso->stack_size]);
2334 /* There must be at least one frame, namely the STOP_FRAME.
2336 ASSERT((P_)frame < bottom);
2338 /* Walk down the stack, reversing the links between frames so that
2339 * we can walk back up as we squeeze from the bottom. Note that
2340 * next_frame and prev_frame refer to next and previous as they were
2341 * added to the stack, rather than the way we see them in this
2342 * walk. (It makes the next loop less confusing.)
2344 * Could stop if we find an update frame pointing to a black hole,
2345 * but see comment in threadLazyBlackHole().
2349 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2350 prev_frame = frame->link;
2351 frame->link = next_frame;
2356 /* Now, we're at the bottom. Frame points to the lowest update
2357 * frame on the stack, and its link actually points to the frame
2358 * above. We have to walk back up the stack, squeezing out empty
2359 * update frames and turning the pointers back around on the way
2362 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2363 * we never want to eliminate it anyway. Just walk one step up
2364 * before starting to squeeze. When you get to the topmost frame,
2365 * remember that there are still some words above it that might have
2372 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2375 * Loop through all of the frames (everything except the very
2376 * bottom). Things are complicated by the fact that we have
2377 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2378 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2380 while (frame != NULL) {
2382 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2383 rtsBool is_update_frame;
2385 next_frame = frame->link;
2386 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2389 * 1. both the previous and current frame are update frames
2390 * 2. the current frame is empty
2392 if (prev_was_update_frame && is_update_frame &&
2393 (P_)prev_frame == frame_bottom + displacement) {
2395 /* Now squeeze out the current frame */
2396 StgClosure *updatee_keep = prev_frame->updatee;
2397 StgClosure *updatee_bypass = frame->updatee;
2400 fprintf(stderr, "squeezing frame at %p\n", frame);
2403 /* Deal with blocking queues. If both updatees have blocked
2404 * threads, then we should merge the queues into the update
2405 * frame that we're keeping.
2407 * Alternatively, we could just wake them up: they'll just go
2408 * straight to sleep on the proper blackhole! This is less code
2409 * and probably less bug prone, although it's probably much
2412 #if 0 /* do it properly... */
2413 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2414 /* Sigh. It has one. Don't lose those threads! */
2415 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2416 /* Urgh. Two queues. Merge them. */
2417 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
2419 while (keep_tso->link != END_TSO_QUEUE) {
2420 keep_tso = keep_tso->link;
2422 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
2425 /* For simplicity, just swap the BQ for the BH */
2426 P_ temp = updatee_keep;
2428 updatee_keep = updatee_bypass;
2429 updatee_bypass = temp;
2431 /* Record the swap in the kept frame (below) */
2432 prev_frame->updatee = updatee_keep;
2437 TICK_UPD_SQUEEZED();
2438 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2440 sp = (P_)frame - 1; /* sp = stuff to slide */
2441 displacement += sizeofW(StgUpdateFrame);
2444 /* No squeeze for this frame */
2445 sp = frame_bottom - 1; /* Keep the current frame */
2447 /* Do lazy black-holing.
2449 if (is_update_frame) {
2450 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
2451 if (bh->header.info != &BLACKHOLE_info
2452 && bh->header.info != &BLACKHOLE_BQ_info
2453 && bh->header.info != &CAF_BLACKHOLE_info
2455 SET_INFO(bh,&BLACKHOLE_info);
2459 /* Fix the link in the current frame (should point to the frame below) */
2460 frame->link = prev_frame;
2461 prev_was_update_frame = is_update_frame;
2464 /* Now slide all words from sp up to the next frame */
2466 if (displacement > 0) {
2467 P_ next_frame_bottom;
2469 if (next_frame != NULL)
2470 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2472 next_frame_bottom = tso->sp - 1;
2475 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2479 while (sp >= next_frame_bottom) {
2480 sp[displacement] = *sp;
2484 (P_)prev_frame = (P_)frame + displacement;
2488 tso->sp += displacement;
2489 tso->su = prev_frame;
2492 /* -----------------------------------------------------------------------------
2495 * We have to prepare for GC - this means doing lazy black holing
2496 * here. We also take the opportunity to do stack squeezing if it's
2498 * -------------------------------------------------------------------------- */
2501 threadPaused(StgTSO *tso)
2503 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2504 threadSqueezeStack(tso); /* does black holing too */
2506 threadLazyBlackHole(tso);