1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.9 1999/01/15 17:57:08 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
26 /* STATIC OBJECT LIST.
29 * We maintain a linked list of static objects that are still live.
30 * The requirements for this list are:
32 * - we need to scan the list while adding to it, in order to
33 * scavenge all the static objects (in the same way that
34 * breadth-first scavenging works for dynamic objects).
36 * - we need to be able to tell whether an object is already on
37 * the list, to break loops.
39 * Each static object has a "static link field", which we use for
40 * linking objects on to the list. We use a stack-type list, consing
41 * objects on the front as they are added (this means that the
42 * scavenge phase is depth-first, not breadth-first, but that
45 * A separate list is kept for objects that have been scavenged
46 * already - this is so that we can zero all the marks afterwards.
48 * An object is on the list if its static link field is non-zero; this
49 * means that we have to mark the end of the list with '1', not NULL.
51 * Extra notes for generational GC:
53 * Each generation has a static object list associated with it. When
54 * collecting generations up to N, we treat the static object lists
55 * from generations > N as roots.
57 * We build up a static object list while collecting generations 0..N,
58 * which is then appended to the static object list of generation N+1.
60 StgClosure* static_objects; /* live static objects */
61 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
63 /* N is the oldest generation being collected, where the generations
64 * are numbered starting at 0. A major GC (indicated by the major_gc
65 * flag) is when we're collecting all generations. We only attempt to
66 * deal with static objects and GC CAFs when doing a major GC.
69 static rtsBool major_gc;
71 /* Youngest generation that objects should be evacuated to in
72 * evacuate(). (Logically an argument to evacuate, but it's static
73 * a lot of the time so we optimise it into a global variable).
79 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
80 static rtsBool weak_done; /* all done for this pass */
82 /* Flag indicating failure to evacuate an object to the desired
85 static rtsBool failed_to_evac;
87 /* -----------------------------------------------------------------------------
88 Static function declarations
89 -------------------------------------------------------------------------- */
91 static StgClosure *evacuate(StgClosure *q);
92 static void zeroStaticObjectList(StgClosure* first_static);
93 static rtsBool traverse_weak_ptr_list(void);
94 static void zeroMutableList(StgMutClosure *first);
95 static void revertDeadCAFs(void);
97 static void scavenge_stack(StgPtr p, StgPtr stack_end);
98 static void scavenge_large(step *step);
99 static void scavenge(step *step);
100 static void scavenge_static(void);
101 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
104 static void gcCAFs(void);
107 /* -----------------------------------------------------------------------------
110 For garbage collecting generation N (and all younger generations):
112 - follow all pointers in the root set. the root set includes all
113 mutable objects in all steps in all generations.
115 - for each pointer, evacuate the object it points to into either
116 + to-space in the next higher step in that generation, if one exists,
117 + if the object's generation == N, then evacuate it to the next
118 generation if one exists, or else to-space in the current
120 + if the object's generation < N, then evacuate it to to-space
121 in the next generation.
123 - repeatedly scavenge to-space from each step in each generation
124 being collected until no more objects can be evacuated.
126 - free from-space in each step, and set from-space = to-space.
128 -------------------------------------------------------------------------- */
130 void GarbageCollect(void (*get_roots)(void))
134 lnat live, allocated, collected = 0;
138 CostCentreStack *prev_CCS;
141 /* tell the stats department that we've started a GC */
144 /* attribute any costs to CCS_GC */
150 /* We might have been called from Haskell land by _ccall_GC, in
151 * which case we need to call threadPaused() because the scheduler
152 * won't have done it.
154 if (CurrentTSO) { threadPaused(CurrentTSO); }
156 /* Approximate how much we allocated: number of blocks in the
157 * nursery + blocks allocated via allocate() - unused nusery blocks.
158 * This leaves a little slop at the end of each block, and doesn't
159 * take into account large objects (ToDo).
161 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
162 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
163 allocated -= BLOCK_SIZE_W;
166 /* Figure out which generation to collect
168 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
169 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
173 major_gc = (N == RtsFlags.GcFlags.generations-1);
175 /* check stack sanity *before* GC (ToDo: check all threads) */
176 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
177 IF_DEBUG(sanity, checkFreeListSanity());
179 /* Initialise the static object lists
181 static_objects = END_OF_STATIC_LIST;
182 scavenged_static_objects = END_OF_STATIC_LIST;
184 /* zero the mutable list for the oldest generation (see comment by
185 * zeroMutableList below).
188 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
191 /* Initialise to-space in all the generations/steps that we're
194 for (g = 0; g <= N; g++) {
195 generations[g].mut_list = END_MUT_LIST;
197 for (s = 0; s < generations[g].n_steps; s++) {
198 /* generation 0, step 0 doesn't need to-space */
199 if (g == 0 && s == 0) { continue; }
200 /* Get a free block for to-space. Extra blocks will be chained on
204 step = &generations[g].steps[s];
205 ASSERT(step->gen->no == g);
206 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
207 bd->gen = &generations[g];
210 bd->evacuated = 1; /* it's a to-space block */
211 step->hp = bd->start;
212 step->hpLim = step->hp + BLOCK_SIZE_W;
215 step->to_blocks = 1; /* ???? */
216 step->scan = bd->start;
218 step->new_large_objects = NULL;
219 step->scavenged_large_objects = NULL;
220 /* mark the large objects as not evacuated yet */
221 for (bd = step->large_objects; bd; bd = bd->link) {
227 /* make sure the older generations have at least one block to
228 * allocate into (this makes things easier for copy(), see below.
230 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
231 for (s = 0; s < generations[g].n_steps; s++) {
232 step = &generations[g].steps[s];
233 if (step->hp_bd == NULL) {
235 bd->gen = &generations[g];
238 bd->evacuated = 0; /* *not* a to-space block */
239 step->hp = bd->start;
240 step->hpLim = step->hp + BLOCK_SIZE_W;
245 /* Set the scan pointer for older generations: remember we
246 * still have to scavenge objects that have been promoted. */
247 step->scan = step->hp;
248 step->scan_bd = step->hp_bd;
249 step->to_space = NULL;
251 step->new_large_objects = NULL;
252 step->scavenged_large_objects = NULL;
254 /* retain these so we can sanity-check later on */
255 step->old_scan = step->scan;
256 step->old_scan_bd = step->scan_bd;
261 /* -----------------------------------------------------------------------
262 * follow all the roots that the application knows about.
267 /* follow all the roots that we know about:
268 * - mutable lists from each generation > N
269 * we want to *scavenge* these roots, not evacuate them: they're not
270 * going to move in this GC.
271 * Also: do them in reverse generation order. This is because we
272 * often want to promote objects that are pointed to by older
273 * generations early, so we don't have to repeatedly copy them.
274 * Doing the generations in reverse order ensures that we don't end
275 * up in the situation where we want to evac an object to gen 3 and
276 * it has already been evaced to gen 2.
279 StgMutClosure *tmp, **pp;
280 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
281 /* the act of scavenging the mutable list for this generation
282 * might place more objects on the mutable list itself. So we
283 * place the current mutable list in a temporary, scavenge it,
284 * and then append it to the new list.
286 tmp = generations[g].mut_list;
287 generations[g].mut_list = END_MUT_LIST;
288 tmp = scavenge_mutable_list(tmp, g);
290 pp = &generations[g].mut_list;
291 while (*pp != END_MUT_LIST) {
292 pp = &(*pp)->mut_link;
297 /* And don't forget to mark the TSO if we got here direct from
300 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
303 /* Mark the weak pointer list, and prepare to detect dead weak
307 old_weak_ptr_list = weak_ptr_list;
308 weak_ptr_list = NULL;
309 weak_done = rtsFalse;
313 /* ToDo: To fix the caf leak, we need to make the commented out
314 * parts of this code do something sensible - as described in
317 extern void markHugsObjects(void);
319 /* ToDo: This (undefined) function should contain the scavenge
320 * loop immediately below this block of code - but I'm not sure
321 * enough of the details to do this myself.
323 scavengeEverything();
324 /* revert dead CAFs and update enteredCAFs list */
329 /* This will keep the CAFs and the attached BCOs alive
330 * but the values will have been reverted
332 scavengeEverything();
337 /* -------------------------------------------------------------------------
338 * Repeatedly scavenge all the areas we know about until there's no
339 * more scavenging to be done.
346 /* scavenge static objects */
347 if (major_gc && static_objects != END_OF_STATIC_LIST) {
351 /* When scavenging the older generations: Objects may have been
352 * evacuated from generations <= N into older generations, and we
353 * need to scavenge these objects. We're going to try to ensure that
354 * any evacuations that occur move the objects into at least the
355 * same generation as the object being scavenged, otherwise we
356 * have to create new entries on the mutable list for the older
360 /* scavenge each step in generations 0..maxgen */
363 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
364 for (s = 0; s < generations[gen].n_steps; s++) {
365 step = &generations[gen].steps[s];
367 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
371 if (step->new_large_objects != NULL) {
372 scavenge_large(step);
378 if (flag) { goto loop; }
380 /* must be last... */
381 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
386 /* run through all the generations/steps and tidy up
388 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
391 generations[g].collections++; /* for stats */
394 for (s = 0; s < generations[g].n_steps; s++) {
396 step = &generations[g].steps[s];
398 if (!(g == 0 && s == 0)) {
399 /* Tidy the end of the to-space chains */
400 step->hp_bd->free = step->hp;
401 step->hp_bd->link = NULL;
404 /* for generations we collected... */
407 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
409 /* free old memory and shift to-space into from-space for all
410 * the collected steps (except the allocation area). These
411 * freed blocks will probaby be quickly recycled.
413 if (!(g == 0 && s == 0)) {
414 freeChain(step->blocks);
415 step->blocks = step->to_space;
416 step->n_blocks = step->to_blocks;
417 step->to_space = NULL;
419 for (bd = step->blocks; bd != NULL; bd = bd->link) {
420 bd->evacuated = 0; /* now from-space */
424 /* LARGE OBJECTS. The current live large objects are chained on
425 * scavenged_large, having been moved during garbage
426 * collection from large_objects. Any objects left on
427 * large_objects list are therefore dead, so we free them here.
429 for (bd = step->large_objects; bd != NULL; bd = next) {
434 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
437 step->large_objects = step->scavenged_large_objects;
439 /* Set the maximum blocks for this generation,
440 * using an arbitrary factor of the no. of blocks in step 0.
443 generation *gen = &generations[g];
445 stg_max(gen->steps[s].n_blocks * 2,
446 RtsFlags.GcFlags.minAllocAreaSize * 4);
447 if (gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
448 gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
449 if (((int)gen->max_blocks - (int)gen->steps[0].n_blocks) <
450 (RtsFlags.GcFlags.pcFreeHeap *
451 RtsFlags.GcFlags.maxHeapSize / 200)) {
457 /* for older generations... */
460 /* For older generations, we need to append the
461 * scavenged_large_object list (i.e. large objects that have been
462 * promoted during this GC) to the large_object list for that step.
464 for (bd = step->scavenged_large_objects; bd; bd = next) {
467 dbl_link_onto(bd, &step->large_objects);
470 /* add the new blocks we promoted during this GC */
471 step->n_blocks += step->to_blocks;
476 /* revert dead CAFs and update enteredCAFs list */
479 /* mark the garbage collected CAFs as dead */
481 if (major_gc) { gcCAFs(); }
484 /* zero the scavenged static object list */
486 zeroStaticObjectList(scavenged_static_objects);
491 for (bd = g0s0->blocks; bd; bd = bd->link) {
492 bd->free = bd->start;
493 ASSERT(bd->gen == g0);
494 ASSERT(bd->step == g0s0);
496 current_nursery = g0s0->blocks;
499 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
500 for (s = 0; s < generations[g].n_steps; s++) {
501 /* approximate amount of live data (doesn't take into account slop
502 * at end of each block). ToDo: this more accurately.
504 if (g == 0 && s == 0) { continue; }
505 step = &generations[g].steps[s];
506 live += step->n_blocks * BLOCK_SIZE_W +
507 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
511 /* Free the small objects allocated via allocate(), since this will
512 * all have been copied into G0S1 now.
514 if (small_alloc_list != NULL) {
515 freeChain(small_alloc_list);
517 small_alloc_list = NULL;
519 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
521 /* start any pending finalisers */
522 scheduleFinalisers(old_weak_ptr_list);
524 /* check sanity after GC */
526 for (g = 0; g <= N; g++) {
527 for (s = 0; s < generations[g].n_steps; s++) {
528 if (g == 0 && s == 0) { continue; }
529 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
530 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
533 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
534 for (s = 0; s < generations[g].n_steps; s++) {
535 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].old_scan_bd,
536 generations[g].steps[s].old_scan));
537 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
540 IF_DEBUG(sanity, checkFreeListSanity());
543 IF_DEBUG(gc, stat_describe_gens());
546 /* symbol-table based profiling */
547 /* heapCensus(to_space); */ /* ToDo */
550 /* restore enclosing cost centre */
555 /* check for memory leaks if sanity checking is on */
556 IF_DEBUG(sanity, memInventory());
558 /* ok, GC over: tell the stats department what happened. */
559 stat_endGC(allocated, collected, live, N);
562 /* -----------------------------------------------------------------------------
565 traverse_weak_ptr_list is called possibly many times during garbage
566 collection. It returns a flag indicating whether it did any work
567 (i.e. called evacuate on any live pointers).
569 Invariant: traverse_weak_ptr_list is called when the heap is in an
570 idempotent state. That means that there are no pending
571 evacuate/scavenge operations. This invariant helps the weak
572 pointer code decide which weak pointers are dead - if there are no
573 new live weak pointers, then all the currently unreachable ones are
576 For generational GC: we just don't try to finalise weak pointers in
577 older generations than the one we're collecting. This could
578 probably be optimised by keeping per-generation lists of weak
579 pointers, but for a few weak pointers this scheme will work.
580 -------------------------------------------------------------------------- */
583 traverse_weak_ptr_list(void)
585 StgWeak *w, **last_w, *next_w;
587 const StgInfoTable *info;
588 rtsBool flag = rtsFalse;
590 if (weak_done) { return rtsFalse; }
592 /* doesn't matter where we evacuate values/finalisers to, since
593 * these pointers are treated as roots (iff the keys are alive).
597 last_w = &old_weak_ptr_list;
598 for (w = old_weak_ptr_list; w; w = next_w) {
601 /* ignore weak pointers in older generations */
602 if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) {
603 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive (in old gen) at %p\n", w));
604 /* remove this weak ptr from the old_weak_ptr list */
606 /* and put it on the new weak ptr list */
608 w->link = weak_ptr_list;
614 info = get_itbl(target);
615 switch (info->type) {
620 case IND_OLDGEN: /* rely on compatible layout with StgInd */
621 case IND_OLDGEN_PERM:
622 /* follow indirections */
623 target = ((StgInd *)target)->indirectee;
627 /* If key is alive, evacuate value and finaliser and
628 * place weak ptr on new weak ptr list.
630 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w));
631 w->key = ((StgEvacuated *)target)->evacuee;
632 w->value = evacuate(w->value);
633 w->finaliser = evacuate(w->finaliser);
635 /* remove this weak ptr from the old_weak_ptr list */
638 /* and put it on the new weak ptr list */
640 w->link = weak_ptr_list;
645 default: /* key is dead */
652 /* If we didn't make any changes, then we can go round and kill all
653 * the dead weak pointers. The old_weak_ptr list is used as a list
654 * of pending finalisers later on.
656 if (flag == rtsFalse) {
657 for (w = old_weak_ptr_list; w; w = w->link) {
658 w->value = evacuate(w->value);
659 w->finaliser = evacuate(w->finaliser);
668 MarkRoot(StgClosure *root)
670 root = evacuate(root);
674 static inline void addBlock(step *step)
676 bdescr *bd = allocBlock();
680 if (step->gen->no <= N) {
686 step->hp_bd->free = step->hp;
687 step->hp_bd->link = bd;
688 step->hp = bd->start;
689 step->hpLim = step->hp + BLOCK_SIZE_W;
694 static __inline__ StgClosure *
695 copy(StgClosure *src, W_ size, bdescr *bd)
700 /* Find out where we're going, using the handy "to" pointer in
701 * the step of the source object. If it turns out we need to
702 * evacuate to an older generation, adjust it here (see comment
706 if (step->gen->no < evac_gen) {
707 step = &generations[evac_gen].steps[0];
710 /* chain a new block onto the to-space for the destination step if
713 if (step->hp + size >= step->hpLim) {
719 for(to = dest, from = (P_)src; size>0; --size) {
722 return (StgClosure *)dest;
725 static __inline__ void
726 upd_evacuee(StgClosure *p, StgClosure *dest)
728 StgEvacuated *q = (StgEvacuated *)p;
730 SET_INFO(q,&EVACUATED_info);
734 /* -----------------------------------------------------------------------------
735 Evacuate a mutable object
737 If we evacuate a mutable object to an old generation, cons the
738 object onto the older generation's mutable list.
739 -------------------------------------------------------------------------- */
742 evacuate_mutable(StgMutClosure *c)
747 if (bd->gen->no > 0) {
748 c->mut_link = bd->gen->mut_list;
749 bd->gen->mut_list = c;
753 /* -----------------------------------------------------------------------------
754 Evacuate a large object
756 This just consists of removing the object from the (doubly-linked)
757 large_alloc_list, and linking it on to the (singly-linked)
758 new_large_objects list, from where it will be scavenged later.
760 Convention: bd->evacuated is /= 0 for a large object that has been
761 evacuated, or 0 otherwise.
762 -------------------------------------------------------------------------- */
765 evacuate_large(StgPtr p, rtsBool mutable)
767 bdescr *bd = Bdescr(p);
770 /* should point to the beginning of the block */
771 ASSERT(((W_)p & BLOCK_MASK) == 0);
773 /* already evacuated? */
775 /* Don't forget to set the failed_to_evac flag if we didn't get
776 * the desired destination (see comments in evacuate()).
778 if (bd->gen->no < evac_gen) {
779 failed_to_evac = rtsTrue;
785 /* remove from large_object list */
787 bd->back->link = bd->link;
788 } else { /* first object in the list */
789 step->large_objects = bd->link;
792 bd->link->back = bd->back;
795 /* link it on to the evacuated large object list of the destination step
798 if (step->gen->no < evac_gen) {
799 step = &generations[evac_gen].steps[0];
804 bd->link = step->new_large_objects;
805 step->new_large_objects = bd;
809 evacuate_mutable((StgMutClosure *)p);
813 /* -----------------------------------------------------------------------------
814 Adding a MUT_CONS to an older generation.
816 This is necessary from time to time when we end up with an
817 old-to-new generation pointer in a non-mutable object. We defer
818 the promotion until the next GC.
819 -------------------------------------------------------------------------- */
822 mkMutCons(StgClosure *ptr, generation *gen)
827 step = &gen->steps[0];
829 /* chain a new block onto the to-space for the destination step if
832 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
836 q = (StgMutVar *)step->hp;
837 step->hp += sizeofW(StgMutVar);
839 SET_HDR(q,&MUT_CONS_info,CCS_GC);
841 evacuate_mutable((StgMutClosure *)q);
843 return (StgClosure *)q;
846 /* -----------------------------------------------------------------------------
849 This is called (eventually) for every live object in the system.
851 The caller to evacuate specifies a desired generation in the
852 evac_gen global variable. The following conditions apply to
853 evacuating an object which resides in generation M when we're
854 collecting up to generation N
858 else evac to step->to
860 if M < evac_gen evac to evac_gen, step 0
862 if the object is already evacuated, then we check which generation
865 if M >= evac_gen do nothing
866 if M < evac_gen set failed_to_evac flag to indicate that we
867 didn't manage to evacuate this object into evac_gen.
869 -------------------------------------------------------------------------- */
873 evacuate(StgClosure *q)
877 const StgInfoTable *info;
880 if (!LOOKS_LIKE_STATIC(q)) {
882 if (bd->gen->no > N) {
883 /* Can't evacuate this object, because it's in a generation
884 * older than the ones we're collecting. Let's hope that it's
885 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
887 if (bd->gen->no < evac_gen) {
889 failed_to_evac = rtsTrue;
895 /* make sure the info pointer is into text space */
896 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
897 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
900 switch (info -> type) {
903 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),bd);
909 to = copy(q,sizeW_fromITBL(info),bd);
911 evacuate_mutable((StgMutClosure *)to);
918 case IND_OLDGEN_PERM:
923 to = copy(q,sizeW_fromITBL(info),bd);
930 /* ToDo: don't need to copy all the blackhole, some of it is
933 to = copy(q,BLACKHOLE_sizeW(),bd);
939 const StgInfoTable* selectee_info;
940 StgClosure* selectee = ((StgSelector*)q)->selectee;
943 selectee_info = get_itbl(selectee);
944 switch (selectee_info->type) {
948 StgNat32 offset = info->layout.selector_offset;
950 /* check that the size is in range */
952 (StgNat32)(selectee_info->layout.payload.ptrs +
953 selectee_info->layout.payload.nptrs));
955 /* perform the selection! */
956 q = selectee->payload[offset];
958 /* if we're already in to-space, there's no need to continue
959 * with the evacuation, just update the source address with
960 * a pointer to the (evacuated) constructor field.
962 if (IS_USER_PTR(q)) {
963 bdescr *bd = Bdescr((P_)q);
965 if (bd->gen->no < evac_gen) {
966 failed_to_evac = rtsTrue;
972 /* otherwise, carry on and evacuate this constructor field,
973 * (but not the constructor itself)
982 case IND_OLDGEN_PERM:
983 selectee = stgCast(StgInd *,selectee)->indirectee;
987 selectee = stgCast(StgCAF *,selectee)->value;
991 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
997 /* aargh - do recursively???? */
1002 /* not evaluated yet */
1006 barf("evacuate: THUNK_SELECTOR: strange selectee");
1009 to = copy(q,THUNK_SELECTOR_sizeW(),bd);
1015 /* follow chains of indirections, don't evacuate them */
1016 q = ((StgInd*)q)->indirectee;
1019 /* ToDo: optimise STATIC_LINK for known cases.
1020 - FUN_STATIC : payload[0]
1021 - THUNK_STATIC : payload[1]
1022 - IND_STATIC : payload[1]
1026 if (info->srt_len == 0) { /* small optimisation */
1032 /* don't want to evacuate these, but we do want to follow pointers
1033 * from SRTs - see scavenge_static.
1036 /* put the object on the static list, if necessary.
1038 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1039 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1040 static_objects = (StgClosure *)q;
1044 case CONSTR_INTLIKE:
1045 case CONSTR_CHARLIKE:
1046 case CONSTR_NOCAF_STATIC:
1047 /* no need to put these on the static linked list, they don't need
1062 /* shouldn't see these */
1063 barf("evacuate: stack frame\n");
1067 /* these are special - the payload is a copy of a chunk of stack,
1069 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),bd);
1074 /* Already evacuated, just return the forwarding address.
1075 * HOWEVER: if the requested destination generation (evac_gen) is
1076 * older than the actual generation (because the object was
1077 * already evacuated to a younger generation) then we have to
1078 * set the failed_to_evac flag to indicate that we couldn't
1079 * manage to promote the object to the desired generation.
1081 if (evac_gen > 0) { /* optimisation */
1082 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1083 if (Bdescr((P_)p)->gen->no < evac_gen) {
1084 /* fprintf(stderr,"evac failed!\n");*/
1085 failed_to_evac = rtsTrue;
1088 return ((StgEvacuated*)q)->evacuee;
1093 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1095 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1096 evacuate_large((P_)q, rtsFalse);
1099 /* just copy the block */
1100 to = copy(q,size,bd);
1107 case MUT_ARR_PTRS_FROZEN:
1109 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1111 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1112 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1115 /* just copy the block */
1116 to = copy(q,size,bd);
1118 if (info->type == MUT_ARR_PTRS) {
1119 evacuate_mutable((StgMutClosure *)to);
1127 StgTSO *tso = stgCast(StgTSO *,q);
1128 nat size = tso_sizeW(tso);
1131 /* Large TSOs don't get moved, so no relocation is required.
1133 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1134 evacuate_large((P_)q, rtsFalse);
1135 tso->mut_link = NULL; /* see below */
1138 /* To evacuate a small TSO, we need to relocate the update frame
1142 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),bd);
1144 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1146 /* relocate the stack pointers... */
1147 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1148 new_tso->sp = (StgPtr)new_tso->sp + diff;
1149 new_tso->splim = (StgPtr)new_tso->splim + diff;
1151 relocate_TSO(tso, new_tso);
1152 upd_evacuee(q,(StgClosure *)new_tso);
1154 /* don't evac_mutable - these things are marked mutable as
1155 * required. We *do* need to zero the mut_link field, though:
1156 * this TSO might have been on the mutable list for this
1157 * generation, but we're collecting this generation anyway so
1158 * we didn't follow the mutable list.
1160 new_tso->mut_link = NULL;
1162 return (StgClosure *)new_tso;
1168 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1172 barf("evacuate: strange closure type");
1178 /* -----------------------------------------------------------------------------
1179 relocate_TSO is called just after a TSO has been copied from src to
1180 dest. It adjusts the update frame list for the new location.
1181 -------------------------------------------------------------------------- */
1184 relocate_TSO(StgTSO *src, StgTSO *dest)
1191 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1195 while ((P_)su < dest->stack + dest->stack_size) {
1196 switch (get_itbl(su)->type) {
1198 /* GCC actually manages to common up these three cases! */
1201 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1206 cf = (StgCatchFrame *)su;
1207 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1212 sf = (StgSeqFrame *)su;
1213 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1222 barf("relocate_TSO");
1231 scavenge_srt(const StgInfoTable *info)
1233 StgClosure **srt, **srt_end;
1235 /* evacuate the SRT. If srt_len is zero, then there isn't an
1236 * srt field in the info table. That's ok, because we'll
1237 * never dereference it.
1239 srt = stgCast(StgClosure **,info->srt);
1240 srt_end = srt + info->srt_len;
1241 for (; srt < srt_end; srt++) {
1246 /* -----------------------------------------------------------------------------
1247 Scavenge a given step until there are no more objects in this step
1250 evac_gen is set by the caller to be either zero (for a step in a
1251 generation < N) or G where G is the generation of the step being
1254 We sometimes temporarily change evac_gen back to zero if we're
1255 scavenging a mutable object where early promotion isn't such a good
1257 -------------------------------------------------------------------------- */
1261 scavenge(step *step)
1264 const StgInfoTable *info;
1266 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1271 failed_to_evac = rtsFalse;
1273 /* scavenge phase - standard breadth-first scavenging of the
1277 while (bd != step->hp_bd || p < step->hp) {
1279 /* If we're at the end of this block, move on to the next block */
1280 if (bd != step->hp_bd && p == bd->free) {
1286 q = p; /* save ptr to object */
1288 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1289 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1291 info = get_itbl((StgClosure *)p);
1292 switch (info -> type) {
1296 StgBCO* bco = stgCast(StgBCO*,p);
1298 for (i = 0; i < bco->n_ptrs; i++) {
1299 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1301 p += bco_sizeW(bco);
1306 /* treat MVars specially, because we don't want to evacuate the
1307 * mut_link field in the middle of the closure.
1310 StgMVar *mvar = ((StgMVar *)p);
1312 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1313 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1314 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1315 p += sizeofW(StgMVar);
1316 evac_gen = saved_evac_gen;
1329 case IND_OLDGEN_PERM:
1335 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1336 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1337 (StgClosure *)*p = evacuate((StgClosure *)*p);
1339 p += info->layout.payload.nptrs;
1344 /* ignore MUT_CONSs */
1345 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1347 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1348 evac_gen = saved_evac_gen;
1350 p += sizeofW(StgMutVar);
1355 p += BLACKHOLE_sizeW();
1360 StgBlackHole *bh = (StgBlackHole *)p;
1361 (StgClosure *)bh->blocking_queue =
1362 evacuate((StgClosure *)bh->blocking_queue);
1363 p += BLACKHOLE_sizeW();
1367 case THUNK_SELECTOR:
1369 StgSelector *s = (StgSelector *)p;
1370 s->selectee = evacuate(s->selectee);
1371 p += THUNK_SELECTOR_sizeW();
1377 barf("scavenge:IND???\n");
1379 case CONSTR_INTLIKE:
1380 case CONSTR_CHARLIKE:
1382 case CONSTR_NOCAF_STATIC:
1386 /* Shouldn't see a static object here. */
1387 barf("scavenge: STATIC object\n");
1399 /* Shouldn't see stack frames here. */
1400 barf("scavenge: stack frame\n");
1402 case AP_UPD: /* same as PAPs */
1404 /* Treat a PAP just like a section of stack, not forgetting to
1405 * evacuate the function pointer too...
1408 StgPAP* pap = stgCast(StgPAP*,p);
1410 pap->fun = evacuate(pap->fun);
1411 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1412 p += pap_sizeW(pap);
1418 /* nothing to follow */
1419 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1423 /* follow everything */
1427 evac_gen = 0; /* repeatedly mutable */
1428 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1429 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1430 (StgClosure *)*p = evacuate((StgClosure *)*p);
1432 evac_gen = saved_evac_gen;
1436 case MUT_ARR_PTRS_FROZEN:
1437 /* follow everything */
1439 StgPtr start = p, next;
1441 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1442 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1443 (StgClosure *)*p = evacuate((StgClosure *)*p);
1445 if (failed_to_evac) {
1446 /* we can do this easier... */
1447 evacuate_mutable((StgMutClosure *)start);
1448 failed_to_evac = rtsFalse;
1459 /* chase the link field for any TSOs on the same queue */
1460 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1461 /* scavenge this thread's stack */
1462 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1463 evac_gen = saved_evac_gen;
1464 p += tso_sizeW(tso);
1471 barf("scavenge: unimplemented/strange closure type\n");
1477 /* If we didn't manage to promote all the objects pointed to by
1478 * the current object, then we have to designate this object as
1479 * mutable (because it contains old-to-new generation pointers).
1481 if (failed_to_evac) {
1482 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1483 failed_to_evac = rtsFalse;
1491 /* -----------------------------------------------------------------------------
1492 Scavenge one object.
1494 This is used for objects that are temporarily marked as mutable
1495 because they contain old-to-new generation pointers. Only certain
1496 objects can have this property.
1497 -------------------------------------------------------------------------- */
1499 scavenge_one(StgPtr p)
1504 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1505 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1507 info = get_itbl((StgClosure *)p);
1509 switch (info -> type) {
1517 case IND_OLDGEN_PERM:
1523 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1524 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1525 (StgClosure *)*p = evacuate((StgClosure *)*p);
1536 StgBlackHole *bh = (StgBlackHole *)p;
1537 (StgClosure *)bh->blocking_queue =
1538 evacuate((StgClosure *)bh->blocking_queue);
1542 case THUNK_SELECTOR:
1544 StgSelector *s = (StgSelector *)p;
1545 s->selectee = evacuate(s->selectee);
1549 case AP_UPD: /* same as PAPs */
1551 /* Treat a PAP just like a section of stack, not forgetting to
1552 * evacuate the function pointer too...
1555 StgPAP* pap = stgCast(StgPAP*,p);
1557 pap->fun = evacuate(pap->fun);
1558 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1563 /* This might happen if for instance a MUT_CONS was pointing to a
1564 * THUNK which has since been updated. The IND_OLDGEN will
1565 * be on the mutable list anyway, so we don't need to do anything
1571 barf("scavenge_one: strange object");
1574 no_luck = failed_to_evac;
1575 failed_to_evac = rtsFalse;
1580 /* -----------------------------------------------------------------------------
1581 Scavenging mutable lists.
1583 We treat the mutable list of each generation > N (i.e. all the
1584 generations older than the one being collected) as roots. We also
1585 remove non-mutable objects from the mutable list at this point.
1586 -------------------------------------------------------------------------- */
1588 static StgMutClosure *
1589 scavenge_mutable_list(StgMutClosure *p, nat gen)
1592 StgMutClosure *start;
1593 StgMutClosure **prev;
1600 failed_to_evac = rtsFalse;
1602 for (; p != END_MUT_LIST; p = *prev) {
1604 /* make sure the info pointer is into text space */
1605 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1606 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1609 switch(info->type) {
1611 case MUT_ARR_PTRS_FROZEN:
1612 /* remove this guy from the mutable list, but follow the ptrs
1613 * anyway (and make sure they get promoted to this gen).
1618 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1620 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1621 (StgClosure *)*q = evacuate((StgClosure *)*q);
1625 if (failed_to_evac) {
1626 failed_to_evac = rtsFalse;
1627 prev = &p->mut_link;
1629 *prev = p->mut_link;
1635 /* follow everything */
1636 prev = &p->mut_link;
1640 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1641 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1642 (StgClosure *)*q = evacuate((StgClosure *)*q);
1648 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1649 * it from the mutable list if possible by promoting whatever it
1652 if (p->header.info == &MUT_CONS_info) {
1654 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1655 /* didn't manage to promote everything, so leave the
1656 * MUT_CONS on the list.
1658 prev = &p->mut_link;
1660 *prev = p->mut_link;
1664 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1665 prev = &p->mut_link;
1670 /* follow ptrs and remove this from the mutable list */
1672 StgTSO *tso = (StgTSO *)p;
1674 /* Don't bother scavenging if this thread is dead
1676 if (!(tso->whatNext == ThreadComplete ||
1677 tso->whatNext == ThreadKilled)) {
1678 /* Don't need to chase the link field for any TSOs on the
1679 * same queue. Just scavenge this thread's stack
1681 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1684 /* Don't take this TSO off the mutable list - it might still
1685 * point to some younger objects (because we set evac_gen to 0
1688 prev = &tso->mut_link;
1693 case IND_OLDGEN_PERM:
1695 /* Try to pull the indirectee into this generation, so we can
1696 * remove the indirection from the mutable list.
1699 ((StgIndOldGen *)p)->indirectee =
1700 evacuate(((StgIndOldGen *)p)->indirectee);
1703 if (failed_to_evac) {
1704 failed_to_evac = rtsFalse;
1705 prev = &p->mut_link;
1707 *prev = p->mut_link;
1708 /* the mut_link field of an IND_STATIC is overloaded as the
1709 * static link field too (it just so happens that we don't need
1710 * both at the same time), so we need to NULL it out when
1711 * removing this object from the mutable list because the static
1712 * link fields are all assumed to be NULL before doing a major
1720 /* shouldn't have anything else on the mutables list */
1721 barf("scavenge_mutable_object: non-mutable object?");
1728 scavenge_static(void)
1730 StgClosure* p = static_objects;
1731 const StgInfoTable *info;
1733 /* Always evacuate straight to the oldest generation for static
1735 evac_gen = oldest_gen->no;
1737 /* keep going until we've scavenged all the objects on the linked
1739 while (p != END_OF_STATIC_LIST) {
1743 /* make sure the info pointer is into text space */
1744 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1745 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1747 /* Take this object *off* the static_objects list,
1748 * and put it on the scavenged_static_objects list.
1750 static_objects = STATIC_LINK(info,p);
1751 STATIC_LINK(info,p) = scavenged_static_objects;
1752 scavenged_static_objects = p;
1754 switch (info -> type) {
1758 StgInd *ind = (StgInd *)p;
1759 ind->indirectee = evacuate(ind->indirectee);
1761 /* might fail to evacuate it, in which case we have to pop it
1762 * back on the mutable list (and take it off the
1763 * scavenged_static list because the static link and mut link
1764 * pointers are one and the same).
1766 if (failed_to_evac) {
1767 failed_to_evac = rtsFalse;
1768 scavenged_static_objects = STATIC_LINK(info,p);
1769 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
1770 oldest_gen->mut_list = (StgMutClosure *)ind;
1784 next = (P_)p->payload + info->layout.payload.ptrs;
1785 /* evacuate the pointers */
1786 for (q = (P_)p->payload; q < next; q++) {
1787 (StgClosure *)*q = evacuate((StgClosure *)*q);
1793 barf("scavenge_static");
1796 ASSERT(failed_to_evac == rtsFalse);
1798 /* get the next static object from the list. Remeber, there might
1799 * be more stuff on this list now that we've done some evacuating!
1800 * (static_objects is a global)
1806 /* -----------------------------------------------------------------------------
1807 scavenge_stack walks over a section of stack and evacuates all the
1808 objects pointed to by it. We can use the same code for walking
1809 PAPs, since these are just sections of copied stack.
1810 -------------------------------------------------------------------------- */
1813 scavenge_stack(StgPtr p, StgPtr stack_end)
1816 const StgInfoTable* info;
1820 * Each time around this loop, we are looking at a chunk of stack
1821 * that starts with either a pending argument section or an
1822 * activation record.
1825 while (p < stack_end) {
1826 q = *stgCast(StgPtr*,p);
1828 /* If we've got a tag, skip over that many words on the stack */
1829 if (IS_ARG_TAG(stgCast(StgWord,q))) {
1834 /* Is q a pointer to a closure?
1836 if (! LOOKS_LIKE_GHC_INFO(q)) {
1839 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
1840 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
1842 /* otherwise, must be a pointer into the allocation space.
1846 (StgClosure *)*p = evacuate((StgClosure *)q);
1852 * Otherwise, q must be the info pointer of an activation
1853 * record. All activation records have 'bitmap' style layout
1856 info = get_itbl(stgCast(StgClosure*,p));
1858 switch (info->type) {
1860 /* Dynamic bitmap: the mask is stored on the stack */
1862 bitmap = stgCast(StgRetDyn*,p)->liveness;
1863 p = &payloadWord(stgCast(StgRetDyn*,p),0);
1866 /* probably a slow-entry point return address: */
1872 /* Specialised code for update frames, since they're so common.
1873 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
1874 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
1878 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1880 StgClosureType type = get_itbl(frame->updatee)->type;
1882 p += sizeofW(StgUpdateFrame);
1883 if (type == EVACUATED) {
1884 frame->updatee = evacuate(frame->updatee);
1887 bdescr *bd = Bdescr((P_)frame->updatee);
1888 ASSERT(type == BLACKHOLE ||
1889 type == CAF_BLACKHOLE ||
1890 type == BLACKHOLE_BQ);
1891 if (bd->gen->no > N) {
1892 if (bd->gen->no < evac_gen) {
1893 failed_to_evac = rtsTrue;
1897 to = copy(frame->updatee, BLACKHOLE_sizeW(), bd);
1898 upd_evacuee(frame->updatee,to);
1899 frame->updatee = to;
1904 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
1911 bitmap = info->layout.bitmap;
1914 while (bitmap != 0) {
1915 if ((bitmap & 1) == 0) {
1916 (StgClosure *)*p = evacuate((StgClosure *)*p);
1919 bitmap = bitmap >> 1;
1926 /* large bitmap (> 32 entries) */
1931 StgLargeBitmap *large_bitmap;
1934 large_bitmap = info->layout.large_bitmap;
1937 for (i=0; i<large_bitmap->size; i++) {
1938 bitmap = large_bitmap->bitmap[i];
1939 q = p + sizeof(W_) * 8;
1940 while (bitmap != 0) {
1941 if ((bitmap & 1) == 0) {
1942 (StgClosure *)*p = evacuate((StgClosure *)*p);
1945 bitmap = bitmap >> 1;
1947 if (i+1 < large_bitmap->size) {
1949 (StgClosure *)*p = evacuate((StgClosure *)*p);
1955 /* and don't forget to follow the SRT */
1960 barf("scavenge_stack: weird activation record found on stack.\n");
1965 /*-----------------------------------------------------------------------------
1966 scavenge the large object list.
1968 evac_gen set by caller; similar games played with evac_gen as with
1969 scavenge() - see comment at the top of scavenge(). Most large
1970 objects are (repeatedly) mutable, so most of the time evac_gen will
1972 --------------------------------------------------------------------------- */
1975 scavenge_large(step *step)
1979 const StgInfoTable* info;
1980 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1982 evac_gen = 0; /* most objects are mutable */
1983 bd = step->new_large_objects;
1985 for (; bd != NULL; bd = step->new_large_objects) {
1987 /* take this object *off* the large objects list and put it on
1988 * the scavenged large objects list. This is so that we can
1989 * treat new_large_objects as a stack and push new objects on
1990 * the front when evacuating.
1992 step->new_large_objects = bd->link;
1993 dbl_link_onto(bd, &step->scavenged_large_objects);
1996 info = get_itbl(stgCast(StgClosure*,p));
1998 switch (info->type) {
2000 /* only certain objects can be "large"... */
2004 /* nothing to follow */
2008 /* follow everything */
2012 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2013 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2014 (StgClosure *)*p = evacuate((StgClosure *)*p);
2019 case MUT_ARR_PTRS_FROZEN:
2020 /* follow everything */
2022 StgPtr start = p, next;
2024 evac_gen = saved_evac_gen; /* not really mutable */
2025 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2026 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2027 (StgClosure *)*p = evacuate((StgClosure *)*p);
2030 if (failed_to_evac) {
2031 evacuate_mutable((StgMutClosure *)start);
2038 StgBCO* bco = stgCast(StgBCO*,p);
2040 evac_gen = saved_evac_gen;
2041 for (i = 0; i < bco->n_ptrs; i++) {
2042 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2053 /* chase the link field for any TSOs on the same queue */
2054 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2055 /* scavenge this thread's stack */
2056 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2061 barf("scavenge_large: unknown/strange object");
2067 zeroStaticObjectList(StgClosure* first_static)
2071 const StgInfoTable *info;
2073 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2075 link = STATIC_LINK(info, p);
2076 STATIC_LINK(info,p) = NULL;
2080 /* This function is only needed because we share the mutable link
2081 * field with the static link field in an IND_STATIC, so we have to
2082 * zero the mut_link field before doing a major GC, which needs the
2083 * static link field.
2085 * It doesn't do any harm to zero all the mutable link fields on the
2089 zeroMutableList(StgMutClosure *first)
2091 StgMutClosure *next, *c;
2093 for (c = first; c != END_MUT_LIST; c = next) {
2099 /* -----------------------------------------------------------------------------
2101 -------------------------------------------------------------------------- */
2103 void RevertCAFs(void)
2105 while (enteredCAFs != END_CAF_LIST) {
2106 StgCAF* caf = enteredCAFs;
2108 enteredCAFs = caf->link;
2109 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2110 SET_INFO(caf,&CAF_UNENTERED_info);
2111 caf->value = stgCast(StgClosure*,0xdeadbeef);
2112 caf->link = stgCast(StgCAF*,0xdeadbeef);
2116 void revertDeadCAFs(void)
2118 StgCAF* caf = enteredCAFs;
2119 enteredCAFs = END_CAF_LIST;
2120 while (caf != END_CAF_LIST) {
2121 StgCAF* next = caf->link;
2123 switch(GET_INFO(caf)->type) {
2126 /* This object has been evacuated, it must be live. */
2127 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2128 new->link = enteredCAFs;
2134 SET_INFO(caf,&CAF_UNENTERED_info);
2135 caf->value = stgCast(StgClosure*,0xdeadbeef);
2136 caf->link = stgCast(StgCAF*,0xdeadbeef);
2140 barf("revertDeadCAFs: enteredCAFs list corrupted");
2146 /* -----------------------------------------------------------------------------
2147 Sanity code for CAF garbage collection.
2149 With DEBUG turned on, we manage a CAF list in addition to the SRT
2150 mechanism. After GC, we run down the CAF list and blackhole any
2151 CAFs which have been garbage collected. This means we get an error
2152 whenever the program tries to enter a garbage collected CAF.
2154 Any garbage collected CAFs are taken off the CAF list at the same
2156 -------------------------------------------------------------------------- */
2164 const StgInfoTable *info;
2175 ASSERT(info->type == IND_STATIC);
2177 if (STATIC_LINK(info,p) == NULL) {
2178 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2180 SET_INFO(p,&BLACKHOLE_info);
2181 p = STATIC_LINK2(info,p);
2185 pp = &STATIC_LINK2(info,p);
2192 /* fprintf(stderr, "%d CAFs live\n", i); */
2196 /* -----------------------------------------------------------------------------
2199 Whenever a thread returns to the scheduler after possibly doing
2200 some work, we have to run down the stack and black-hole all the
2201 closures referred to by update frames.
2202 -------------------------------------------------------------------------- */
2205 threadLazyBlackHole(StgTSO *tso)
2207 StgUpdateFrame *update_frame;
2211 stack_end = &tso->stack[tso->stack_size];
2212 update_frame = tso->su;
2215 switch (get_itbl(update_frame)->type) {
2218 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2222 bh = stgCast(StgBlackHole*,update_frame->updatee);
2224 /* if the thunk is already blackholed, it means we've also
2225 * already blackholed the rest of the thunks on this stack,
2226 * so we can stop early.
2229 /* Don't for now: when we enter a CAF, we create a black hole on
2230 * the heap and make the update frame point to it. Thus the
2231 * above optimisation doesn't apply.
2233 if (bh->header.info != &BLACKHOLE_info
2234 && bh->header.info != &BLACKHOLE_BQ_info
2235 && bh->header.info != &CAF_BLACKHOLE_info) {
2236 SET_INFO(bh,&BLACKHOLE_info);
2239 update_frame = update_frame->link;
2243 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2249 barf("threadPaused");
2254 /* -----------------------------------------------------------------------------
2257 * Code largely pinched from old RTS, then hacked to bits. We also do
2258 * lazy black holing here.
2260 * -------------------------------------------------------------------------- */
2263 threadSqueezeStack(StgTSO *tso)
2265 lnat displacement = 0;
2266 StgUpdateFrame *frame;
2267 StgUpdateFrame *next_frame; /* Temporally next */
2268 StgUpdateFrame *prev_frame; /* Temporally previous */
2270 rtsBool prev_was_update_frame;
2272 bottom = &(tso->stack[tso->stack_size]);
2275 /* There must be at least one frame, namely the STOP_FRAME.
2277 ASSERT((P_)frame < bottom);
2279 /* Walk down the stack, reversing the links between frames so that
2280 * we can walk back up as we squeeze from the bottom. Note that
2281 * next_frame and prev_frame refer to next and previous as they were
2282 * added to the stack, rather than the way we see them in this
2283 * walk. (It makes the next loop less confusing.)
2285 * Could stop if we find an update frame pointing to a black hole,
2286 * but see comment in threadLazyBlackHole().
2290 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2291 prev_frame = frame->link;
2292 frame->link = next_frame;
2297 /* Now, we're at the bottom. Frame points to the lowest update
2298 * frame on the stack, and its link actually points to the frame
2299 * above. We have to walk back up the stack, squeezing out empty
2300 * update frames and turning the pointers back around on the way
2303 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2304 * we never want to eliminate it anyway. Just walk one step up
2305 * before starting to squeeze. When you get to the topmost frame,
2306 * remember that there are still some words above it that might have
2313 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2316 * Loop through all of the frames (everything except the very
2317 * bottom). Things are complicated by the fact that we have
2318 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2319 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2321 while (frame != NULL) {
2323 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2324 rtsBool is_update_frame;
2326 next_frame = frame->link;
2327 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2330 * 1. both the previous and current frame are update frames
2331 * 2. the current frame is empty
2333 if (prev_was_update_frame && is_update_frame &&
2334 (P_)prev_frame == frame_bottom + displacement) {
2336 /* Now squeeze out the current frame */
2337 StgClosure *updatee_keep = prev_frame->updatee;
2338 StgClosure *updatee_bypass = frame->updatee;
2341 fprintf(stderr, "squeezing frame at %p\n", frame);
2344 /* Deal with blocking queues. If both updatees have blocked
2345 * threads, then we should merge the queues into the update
2346 * frame that we're keeping.
2348 * Alternatively, we could just wake them up: they'll just go
2349 * straight to sleep on the proper blackhole! This is less code
2350 * and probably less bug prone, although it's probably much
2353 #if 0 /* do it properly... */
2354 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info) {
2355 /* Sigh. It has one. Don't lose those threads! */
2356 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
2357 /* Urgh. Two queues. Merge them. */
2358 P_ keep_tso = ((StgBlackHole *)updatee_keep)->blocking_queue;
2360 while (keep_tso->link != END_TSO_QUEUE) {
2361 keep_tso = keep_tso->link;
2363 keep_tso->link = ((StgBlackHole *)updatee_bypass)->blocking_queue;
2366 /* For simplicity, just swap the BQ for the BH */
2367 P_ temp = updatee_keep;
2369 updatee_keep = updatee_bypass;
2370 updatee_bypass = temp;
2372 /* Record the swap in the kept frame (below) */
2373 prev_frame->updatee = updatee_keep;
2378 TICK_UPD_SQUEEZED();
2379 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2381 sp = (P_)frame - 1; /* sp = stuff to slide */
2382 displacement += sizeofW(StgUpdateFrame);
2385 /* No squeeze for this frame */
2386 sp = frame_bottom - 1; /* Keep the current frame */
2388 /* Do lazy black-holing.
2390 if (is_update_frame) {
2391 StgBlackHole *bh = (StgBlackHole *)frame->updatee;
2392 if (bh->header.info != &BLACKHOLE_info
2393 && bh->header.info != &BLACKHOLE_BQ_info
2394 && bh->header.info != &CAF_BLACKHOLE_info
2396 SET_INFO(bh,&BLACKHOLE_info);
2400 /* Fix the link in the current frame (should point to the frame below) */
2401 frame->link = prev_frame;
2402 prev_was_update_frame = is_update_frame;
2405 /* Now slide all words from sp up to the next frame */
2407 if (displacement > 0) {
2408 P_ next_frame_bottom;
2410 if (next_frame != NULL)
2411 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2413 next_frame_bottom = tso->sp - 1;
2416 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2420 while (sp >= next_frame_bottom) {
2421 sp[displacement] = *sp;
2425 (P_)prev_frame = (P_)frame + displacement;
2429 tso->sp += displacement;
2430 tso->su = prev_frame;
2433 /* -----------------------------------------------------------------------------
2436 * We have to prepare for GC - this means doing lazy black holing
2437 * here. We also take the opportunity to do stack squeezing if it's
2439 * -------------------------------------------------------------------------- */
2442 threadPaused(StgTSO *tso)
2444 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2445 threadSqueezeStack(tso); /* does black holing too */
2447 threadLazyBlackHole(tso);