1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.8 1999/01/14 11:11:29 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
26 /* STATIC OBJECT LIST.
29 * We maintain a linked list of static objects that are still live.
30 * The requirements for this list are:
32 * - we need to scan the list while adding to it, in order to
33 * scavenge all the static objects (in the same way that
34 * breadth-first scavenging works for dynamic objects).
36 * - we need to be able to tell whether an object is already on
37 * the list, to break loops.
39 * Each static object has a "static link field", which we use for
40 * linking objects on to the list. We use a stack-type list, consing
41 * objects on the front as they are added (this means that the
42 * scavenge phase is depth-first, not breadth-first, but that
45 * A separate list is kept for objects that have been scavenged
46 * already - this is so that we can zero all the marks afterwards.
48 * An object is on the list if its static link field is non-zero; this
49 * means that we have to mark the end of the list with '1', not NULL.
51 * Extra notes for generational GC:
53 * Each generation has a static object list associated with it. When
54 * collecting generations up to N, we treat the static object lists
55 * from generations > N as roots.
57 * We build up a static object list while collecting generations 0..N,
58 * which is then appended to the static object list of generation N+1.
60 StgClosure* static_objects; /* live static objects */
61 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
63 /* N is the oldest generation being collected, where the generations
64 * are numbered starting at 0. A major GC (indicated by the major_gc
65 * flag) is when we're collecting all generations. We only attempt to
66 * deal with static objects and GC CAFs when doing a major GC.
69 static rtsBool major_gc;
71 /* Youngest generation that objects should be evacuated to in
72 * evacuate(). (Logically an argument to evacuate, but it's static
73 * a lot of the time so we optimise it into a global variable).
79 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
80 static rtsBool weak_done; /* all done for this pass */
82 /* Flag indicating failure to evacuate an object to the desired
85 static rtsBool failed_to_evac;
87 /* -----------------------------------------------------------------------------
88 Static function declarations
89 -------------------------------------------------------------------------- */
91 static StgClosure *evacuate(StgClosure *q);
92 static void zeroStaticObjectList(StgClosure* first_static);
93 static rtsBool traverse_weak_ptr_list(void);
94 static void zeroMutableList(StgMutClosure *first);
95 static void revertDeadCAFs(void);
97 static void scavenge_stack(StgPtr p, StgPtr stack_end);
98 static void scavenge_large(step *step);
99 static void scavenge(step *step);
100 static void scavenge_static(void);
101 static StgMutClosure *scavenge_mutable_list(StgMutClosure *p, nat gen);
104 static void gcCAFs(void);
107 /* -----------------------------------------------------------------------------
110 For garbage collecting generation N (and all younger generations):
112 - follow all pointers in the root set. the root set includes all
113 mutable objects in all steps in all generations.
115 - for each pointer, evacuate the object it points to into either
116 + to-space in the next higher step in that generation, if one exists,
117 + if the object's generation == N, then evacuate it to the next
118 generation if one exists, or else to-space in the current
120 + if the object's generation < N, then evacuate it to to-space
121 in the next generation.
123 - repeatedly scavenge to-space from each step in each generation
124 being collected until no more objects can be evacuated.
126 - free from-space in each step, and set from-space = to-space.
128 -------------------------------------------------------------------------- */
130 void GarbageCollect(void (*get_roots)(void))
134 lnat live, allocated, collected = 0;
138 CostCentreStack *prev_CCS;
141 /* tell the stats department that we've started a GC */
144 /* attribute any costs to CCS_GC */
150 /* We might have been called from Haskell land by _ccall_GC, in
151 * which case we need to call threadPaused() because the scheduler
152 * won't have done it.
154 if (CurrentTSO) { threadPaused(CurrentTSO); }
156 /* Approximate how much we allocated: number of blocks in the
157 * nursery + blocks allocated via allocate() - unused nusery blocks.
158 * This leaves a little slop at the end of each block, and doesn't
159 * take into account large objects (ToDo).
161 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
162 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
163 allocated -= BLOCK_SIZE_W;
166 /* Figure out which generation to collect
168 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
169 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
173 major_gc = (N == RtsFlags.GcFlags.generations-1);
175 /* check stack sanity *before* GC (ToDo: check all threads) */
176 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
177 IF_DEBUG(sanity, checkFreeListSanity());
179 /* Initialise the static object lists
181 static_objects = END_OF_STATIC_LIST;
182 scavenged_static_objects = END_OF_STATIC_LIST;
184 /* zero the mutable list for the oldest generation (see comment by
185 * zeroMutableList below).
188 zeroMutableList(generations[RtsFlags.GcFlags.generations-1].mut_list);
191 /* Initialise to-space in all the generations/steps that we're
194 for (g = 0; g <= N; g++) {
195 generations[g].mut_list = END_MUT_LIST;
197 for (s = 0; s < generations[g].n_steps; s++) {
198 /* generation 0, step 0 doesn't need to-space */
199 if (g == 0 && s == 0) { continue; }
200 /* Get a free block for to-space. Extra blocks will be chained on
204 step = &generations[g].steps[s];
205 ASSERT(step->gen->no == g);
206 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
207 bd->gen = &generations[g];
210 bd->evacuated = 1; /* it's a to-space block */
211 step->hp = bd->start;
212 step->hpLim = step->hp + BLOCK_SIZE_W;
215 step->to_blocks = 1; /* ???? */
216 step->scan = bd->start;
218 step->new_large_objects = NULL;
219 step->scavenged_large_objects = NULL;
220 /* mark the large objects as not evacuated yet */
221 for (bd = step->large_objects; bd; bd = bd->link) {
227 /* make sure the older generations have at least one block to
228 * allocate into (this makes things easier for copy(), see below.
230 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
231 for (s = 0; s < generations[g].n_steps; s++) {
232 step = &generations[g].steps[s];
233 if (step->hp_bd == NULL) {
235 bd->gen = &generations[g];
238 bd->evacuated = 0; /* *not* a to-space block */
239 step->hp = bd->start;
240 step->hpLim = step->hp + BLOCK_SIZE_W;
245 /* Set the scan pointer for older generations: remember we
246 * still have to scavenge objects that have been promoted. */
247 step->scan = step->hp;
248 step->scan_bd = step->hp_bd;
249 step->to_space = NULL;
251 step->new_large_objects = NULL;
252 step->scavenged_large_objects = NULL;
254 /* retain these so we can sanity-check later on */
255 step->old_scan = step->scan;
256 step->old_scan_bd = step->scan_bd;
261 /* -----------------------------------------------------------------------
262 * follow all the roots that the application knows about.
267 /* follow all the roots that we know about:
268 * - mutable lists from each generation > N
269 * we want to *scavenge* these roots, not evacuate them: they're not
270 * going to move in this GC.
271 * Also: do them in reverse generation order. This is because we
272 * often want to promote objects that are pointed to by older
273 * generations early, so we don't have to repeatedly copy them.
274 * Doing the generations in reverse order ensures that we don't end
275 * up in the situation where we want to evac an object to gen 3 and
276 * it has already been evaced to gen 2.
279 StgMutClosure *tmp, **pp;
280 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
281 /* the act of scavenging the mutable list for this generation
282 * might place more objects on the mutable list itself. So we
283 * place the current mutable list in a temporary, scavenge it,
284 * and then append it to the new list.
286 tmp = generations[g].mut_list;
287 generations[g].mut_list = END_MUT_LIST;
288 tmp = scavenge_mutable_list(tmp, g);
290 pp = &generations[g].mut_list;
291 while (*pp != END_MUT_LIST) {
292 pp = &(*pp)->mut_link;
297 /* And don't forget to mark the TSO if we got here direct from
300 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
303 /* Mark the weak pointer list, and prepare to detect dead weak
307 old_weak_ptr_list = weak_ptr_list;
308 weak_ptr_list = NULL;
309 weak_done = rtsFalse;
313 /* ToDo: To fix the caf leak, we need to make the commented out
314 * parts of this code do something sensible - as described in
317 extern void markHugsObjects(void);
319 /* ToDo: This (undefined) function should contain the scavenge
320 * loop immediately below this block of code - but I'm not sure
321 * enough of the details to do this myself.
323 scavengeEverything();
324 /* revert dead CAFs and update enteredCAFs list */
329 /* This will keep the CAFs and the attached BCOs alive
330 * but the values will have been reverted
332 scavengeEverything();
337 /* -------------------------------------------------------------------------
338 * Repeatedly scavenge all the areas we know about until there's no
339 * more scavenging to be done.
346 /* scavenge static objects */
347 if (major_gc && static_objects != END_OF_STATIC_LIST) {
351 /* When scavenging the older generations: Objects may have been
352 * evacuated from generations <= N into older generations, and we
353 * need to scavenge these objects. We're going to try to ensure that
354 * any evacuations that occur move the objects into at least the
355 * same generation as the object being scavenged, otherwise we
356 * have to create new entries on the mutable list for the older
360 /* scavenge each step in generations 0..maxgen */
363 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
364 for (s = 0; s < generations[gen].n_steps; s++) {
365 step = &generations[gen].steps[s];
367 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
371 if (step->new_large_objects != NULL) {
372 scavenge_large(step);
378 if (flag) { goto loop; }
380 /* must be last... */
381 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
386 /* run through all the generations/steps and tidy up
388 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
391 generations[g].collections++; /* for stats */
394 for (s = 0; s < generations[g].n_steps; s++) {
396 step = &generations[g].steps[s];
398 if (!(g == 0 && s == 0)) {
399 /* Tidy the end of the to-space chains */
400 step->hp_bd->free = step->hp;
401 step->hp_bd->link = NULL;
404 /* for generations we collected... */
407 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
409 /* free old memory and shift to-space into from-space for all
410 * the collected steps (except the allocation area). These
411 * freed blocks will probaby be quickly recycled.
413 if (!(g == 0 && s == 0)) {
414 freeChain(step->blocks);
415 step->blocks = step->to_space;
416 step->n_blocks = step->to_blocks;
417 step->to_space = NULL;
419 for (bd = step->blocks; bd != NULL; bd = bd->link) {
420 bd->evacuated = 0; /* now from-space */
424 /* LARGE OBJECTS. The current live large objects are chained on
425 * scavenged_large, having been moved during garbage
426 * collection from large_objects. Any objects left on
427 * large_objects list are therefore dead, so we free them here.
429 for (bd = step->large_objects; bd != NULL; bd = next) {
434 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
437 step->large_objects = step->scavenged_large_objects;
439 /* Set the maximum blocks for this generation,
440 * using an arbitrary factor of the no. of blocks in step 0.
443 generation *gen = &generations[g];
445 stg_max(gen->steps[s].n_blocks * 2,
446 RtsFlags.GcFlags.minAllocAreaSize * 4);
447 if (gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
448 gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
449 if (((int)gen->max_blocks - (int)gen->steps[0].n_blocks) <
450 (RtsFlags.GcFlags.pcFreeHeap *
451 RtsFlags.GcFlags.maxHeapSize / 200)) {
457 /* for older generations... */
460 /* For older generations, we need to append the
461 * scavenged_large_object list (i.e. large objects that have been
462 * promoted during this GC) to the large_object list for that step.
464 for (bd = step->scavenged_large_objects; bd; bd = next) {
467 dbl_link_onto(bd, &step->large_objects);
470 /* add the new blocks we promoted during this GC */
471 step->n_blocks += step->to_blocks;
476 /* revert dead CAFs and update enteredCAFs list */
479 /* mark the garbage collected CAFs as dead */
481 if (major_gc) { gcCAFs(); }
484 /* zero the scavenged static object list */
486 zeroStaticObjectList(scavenged_static_objects);
491 for (bd = g0s0->blocks; bd; bd = bd->link) {
492 bd->free = bd->start;
493 ASSERT(bd->gen == g0);
494 ASSERT(bd->step == g0s0);
496 current_nursery = g0s0->blocks;
499 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
500 for (s = 0; s < generations[g].n_steps; s++) {
501 /* approximate amount of live data (doesn't take into account slop
502 * at end of each block). ToDo: this more accurately.
504 if (g == 0 && s == 0) { continue; }
505 step = &generations[g].steps[s];
506 live += step->n_blocks * BLOCK_SIZE_W +
507 ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
511 /* Free the small objects allocated via allocate(), since this will
512 * all have been copied into G0S1 now.
514 if (small_alloc_list != NULL) {
515 freeChain(small_alloc_list);
517 small_alloc_list = NULL;
519 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
521 /* start any pending finalisers */
522 scheduleFinalisers(old_weak_ptr_list);
524 /* check sanity after GC */
526 for (g = 0; g <= N; g++) {
527 for (s = 0; s < generations[g].n_steps; s++) {
528 if (g == 0 && s == 0) { continue; }
529 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
530 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
533 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
534 for (s = 0; s < generations[g].n_steps; s++) {
535 IF_DEBUG(sanity, checkHeap(generations[g].steps[s].old_scan_bd,
536 generations[g].steps[s].old_scan));
537 IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
540 IF_DEBUG(sanity, checkFreeListSanity());
543 IF_DEBUG(gc, stat_describe_gens());
546 /* symbol-table based profiling */
547 /* heapCensus(to_space); */ /* ToDo */
550 /* restore enclosing cost centre */
555 /* check for memory leaks if sanity checking is on */
556 IF_DEBUG(sanity, memInventory());
558 /* ok, GC over: tell the stats department what happened. */
559 stat_endGC(allocated, collected, live, N);
562 /* -----------------------------------------------------------------------------
565 traverse_weak_ptr_list is called possibly many times during garbage
566 collection. It returns a flag indicating whether it did any work
567 (i.e. called evacuate on any live pointers).
569 Invariant: traverse_weak_ptr_list is called when the heap is in an
570 idempotent state. That means that there are no pending
571 evacuate/scavenge operations. This invariant helps the weak
572 pointer code decide which weak pointers are dead - if there are no
573 new live weak pointers, then all the currently unreachable ones are
576 For generational GC: we just don't try to finalise weak pointers in
577 older generations than the one we're collecting. This could
578 probably be optimised by keeping per-generation lists of weak
579 pointers, but for a few weak pointers this scheme will work.
580 -------------------------------------------------------------------------- */
583 traverse_weak_ptr_list(void)
585 StgWeak *w, **last_w, *next_w;
587 const StgInfoTable *info;
588 rtsBool flag = rtsFalse;
590 if (weak_done) { return rtsFalse; }
592 /* doesn't matter where we evacuate values/finalisers to, since
593 * these pointers are treated as roots (iff the keys are alive).
597 last_w = &old_weak_ptr_list;
598 for (w = old_weak_ptr_list; w; w = next_w) {
601 /* ignore weak pointers in older generations */
602 if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) {
603 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive (in old gen) at %p\n", w));
604 /* remove this weak ptr from the old_weak_ptr list */
606 /* and put it on the new weak ptr list */
608 w->link = weak_ptr_list;
614 info = get_itbl(target);
615 switch (info->type) {
620 case IND_OLDGEN: /* rely on compatible layout with StgInd */
621 case IND_OLDGEN_PERM:
622 /* follow indirections */
623 target = ((StgInd *)target)->indirectee;
627 /* If key is alive, evacuate value and finaliser and
628 * place weak ptr on new weak ptr list.
630 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w));
631 w->key = ((StgEvacuated *)target)->evacuee;
632 w->value = evacuate(w->value);
633 w->finaliser = evacuate(w->finaliser);
635 /* remove this weak ptr from the old_weak_ptr list */
638 /* and put it on the new weak ptr list */
640 w->link = weak_ptr_list;
645 default: /* key is dead */
652 /* If we didn't make any changes, then we can go round and kill all
653 * the dead weak pointers. The old_weak_ptr list is used as a list
654 * of pending finalisers later on.
656 if (flag == rtsFalse) {
657 for (w = old_weak_ptr_list; w; w = w->link) {
658 w->value = evacuate(w->value);
659 w->finaliser = evacuate(w->finaliser);
668 MarkRoot(StgClosure *root)
670 root = evacuate(root);
674 static inline void addBlock(step *step)
676 bdescr *bd = allocBlock();
680 if (step->gen->no <= N) {
686 step->hp_bd->free = step->hp;
687 step->hp_bd->link = bd;
688 step->hp = bd->start;
689 step->hpLim = step->hp + BLOCK_SIZE_W;
694 static __inline__ StgClosure *
695 copy(StgClosure *src, W_ size, bdescr *bd)
700 /* Find out where we're going, using the handy "to" pointer in
701 * the step of the source object. If it turns out we need to
702 * evacuate to an older generation, adjust it here (see comment
706 if (step->gen->no < evac_gen) {
707 step = &generations[evac_gen].steps[0];
710 /* chain a new block onto the to-space for the destination step if
713 if (step->hp + size >= step->hpLim) {
719 for(to = dest, from = (P_)src; size>0; --size) {
722 return (StgClosure *)dest;
725 static __inline__ void
726 upd_evacuee(StgClosure *p, StgClosure *dest)
728 StgEvacuated *q = (StgEvacuated *)p;
730 SET_INFO(q,&EVACUATED_info);
734 /* -----------------------------------------------------------------------------
735 Evacuate a mutable object
737 If we evacuate a mutable object to an old generation, cons the
738 object onto the older generation's mutable list.
739 -------------------------------------------------------------------------- */
742 evacuate_mutable(StgMutClosure *c)
747 if (bd->gen->no > 0) {
748 c->mut_link = bd->gen->mut_list;
749 bd->gen->mut_list = c;
753 /* -----------------------------------------------------------------------------
754 Evacuate a large object
756 This just consists of removing the object from the (doubly-linked)
757 large_alloc_list, and linking it on to the (singly-linked)
758 new_large_objects list, from where it will be scavenged later.
760 Convention: bd->evacuated is /= 0 for a large object that has been
761 evacuated, or 0 otherwise.
762 -------------------------------------------------------------------------- */
765 evacuate_large(StgPtr p, rtsBool mutable)
767 bdescr *bd = Bdescr(p);
770 /* should point to the beginning of the block */
771 ASSERT(((W_)p & BLOCK_MASK) == 0);
773 /* already evacuated? */
775 /* Don't forget to set the failed_to_evac flag if we didn't get
776 * the desired destination (see comments in evacuate()).
778 if (bd->gen->no < evac_gen) {
779 failed_to_evac = rtsTrue;
785 /* remove from large_object list */
787 bd->back->link = bd->link;
788 } else { /* first object in the list */
789 step->large_objects = bd->link;
792 bd->link->back = bd->back;
795 /* link it on to the evacuated large object list of the destination step
798 if (step->gen->no < evac_gen) {
799 step = &generations[evac_gen].steps[0];
804 bd->link = step->new_large_objects;
805 step->new_large_objects = bd;
809 evacuate_mutable((StgMutClosure *)p);
813 /* -----------------------------------------------------------------------------
814 Adding a MUT_CONS to an older generation.
816 This is necessary from time to time when we end up with an
817 old-to-new generation pointer in a non-mutable object. We defer
818 the promotion until the next GC.
819 -------------------------------------------------------------------------- */
822 mkMutCons(StgClosure *ptr, generation *gen)
827 step = &gen->steps[0];
829 /* chain a new block onto the to-space for the destination step if
832 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
836 q = (StgMutVar *)step->hp;
837 step->hp += sizeofW(StgMutVar);
839 SET_HDR(q,&MUT_CONS_info,CCS_GC);
841 evacuate_mutable((StgMutClosure *)q);
843 return (StgClosure *)q;
846 /* -----------------------------------------------------------------------------
849 This is called (eventually) for every live object in the system.
851 The caller to evacuate specifies a desired generation in the
852 evac_gen global variable. The following conditions apply to
853 evacuating an object which resides in generation M when we're
854 collecting up to generation N
858 else evac to step->to
860 if M < evac_gen evac to evac_gen, step 0
862 if the object is already evacuated, then we check which generation
865 if M >= evac_gen do nothing
866 if M < evac_gen set failed_to_evac flag to indicate that we
867 didn't manage to evacuate this object into evac_gen.
869 -------------------------------------------------------------------------- */
873 evacuate(StgClosure *q)
877 const StgInfoTable *info;
880 if (!LOOKS_LIKE_STATIC(q)) {
882 if (bd->gen->no > N) {
883 /* Can't evacuate this object, because it's in a generation
884 * older than the ones we're collecting. Let's hope that it's
885 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
887 if (bd->gen->no < evac_gen) {
889 failed_to_evac = rtsTrue;
895 /* make sure the info pointer is into text space */
896 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
897 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
900 switch (info -> type) {
903 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)),bd);
909 to = copy(q,sizeW_fromITBL(info),bd);
911 evacuate_mutable((StgMutClosure *)to);
918 case IND_OLDGEN_PERM:
923 to = copy(q,sizeW_fromITBL(info),bd);
929 to = copy(q,BLACKHOLE_sizeW(),bd);
935 const StgInfoTable* selectee_info;
936 StgClosure* selectee = ((StgSelector*)q)->selectee;
939 selectee_info = get_itbl(selectee);
940 switch (selectee_info->type) {
944 StgNat32 offset = info->layout.selector_offset;
946 /* check that the size is in range */
948 (StgNat32)(selectee_info->layout.payload.ptrs +
949 selectee_info->layout.payload.nptrs));
951 /* perform the selection! */
952 q = selectee->payload[offset];
954 /* if we're already in to-space, there's no need to continue
955 * with the evacuation, just update the source address with
956 * a pointer to the (evacuated) constructor field.
958 if (IS_USER_PTR(q)) {
959 bdescr *bd = Bdescr((P_)q);
961 if (bd->gen->no < evac_gen) {
962 failed_to_evac = rtsTrue;
968 /* otherwise, carry on and evacuate this constructor field,
969 * (but not the constructor itself)
978 case IND_OLDGEN_PERM:
979 selectee = stgCast(StgInd *,selectee)->indirectee;
983 selectee = stgCast(StgCAF *,selectee)->value;
987 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
993 /* aargh - do recursively???? */
997 /* not evaluated yet */
1001 barf("evacuate: THUNK_SELECTOR: strange selectee");
1004 to = copy(q,THUNK_SELECTOR_sizeW(),bd);
1010 /* follow chains of indirections, don't evacuate them */
1011 q = ((StgInd*)q)->indirectee;
1014 /* ToDo: optimise STATIC_LINK for known cases.
1015 - FUN_STATIC : payload[0]
1016 - THUNK_STATIC : payload[1]
1017 - IND_STATIC : payload[1]
1021 if (info->srt_len == 0) { /* small optimisation */
1027 /* don't want to evacuate these, but we do want to follow pointers
1028 * from SRTs - see scavenge_static.
1031 /* put the object on the static list, if necessary.
1033 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1034 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1035 static_objects = (StgClosure *)q;
1039 case CONSTR_INTLIKE:
1040 case CONSTR_CHARLIKE:
1041 case CONSTR_NOCAF_STATIC:
1042 /* no need to put these on the static linked list, they don't need
1057 /* shouldn't see these */
1058 barf("evacuate: stack frame\n");
1062 /* these are special - the payload is a copy of a chunk of stack,
1064 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)),bd);
1069 /* Already evacuated, just return the forwarding address.
1070 * HOWEVER: if the requested destination generation (evac_gen) is
1071 * older than the actual generation (because the object was
1072 * already evacuated to a younger generation) then we have to
1073 * set the failed_to_evac flag to indicate that we couldn't
1074 * manage to promote the object to the desired generation.
1076 if (evac_gen > 0) { /* optimisation */
1077 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1078 if (Bdescr((P_)p)->gen->no < evac_gen) {
1079 /* fprintf(stderr,"evac failed!\n");*/
1080 failed_to_evac = rtsTrue;
1083 return ((StgEvacuated*)q)->evacuee;
1088 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
1090 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1091 evacuate_large((P_)q, rtsFalse);
1094 /* just copy the block */
1095 to = copy(q,size,bd);
1102 case MUT_ARR_PTRS_FROZEN:
1104 nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
1106 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1107 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1110 /* just copy the block */
1111 to = copy(q,size,bd);
1113 if (info->type == MUT_ARR_PTRS) {
1114 evacuate_mutable((StgMutClosure *)to);
1122 StgTSO *tso = stgCast(StgTSO *,q);
1123 nat size = tso_sizeW(tso);
1126 /* Large TSOs don't get moved, so no relocation is required.
1128 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1129 evacuate_large((P_)q, rtsFalse);
1130 tso->mut_link = NULL; /* see below */
1133 /* To evacuate a small TSO, we need to relocate the update frame
1137 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),bd);
1139 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1141 /* relocate the stack pointers... */
1142 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1143 new_tso->sp = (StgPtr)new_tso->sp + diff;
1144 new_tso->splim = (StgPtr)new_tso->splim + diff;
1146 relocate_TSO(tso, new_tso);
1147 upd_evacuee(q,(StgClosure *)new_tso);
1149 /* don't evac_mutable - these things are marked mutable as
1150 * required. We *do* need to zero the mut_link field, though:
1151 * this TSO might have been on the mutable list for this
1152 * generation, but we're collecting this generation anyway so
1153 * we didn't follow the mutable list.
1155 new_tso->mut_link = NULL;
1157 return (StgClosure *)new_tso;
1163 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
1167 barf("evacuate: strange closure type");
1173 /* -----------------------------------------------------------------------------
1174 relocate_TSO is called just after a TSO has been copied from src to
1175 dest. It adjusts the update frame list for the new location.
1176 -------------------------------------------------------------------------- */
1179 relocate_TSO(StgTSO *src, StgTSO *dest)
1186 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1190 while ((P_)su < dest->stack + dest->stack_size) {
1191 switch (get_itbl(su)->type) {
1193 /* GCC actually manages to common up these three cases! */
1196 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1201 cf = (StgCatchFrame *)su;
1202 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1207 sf = (StgSeqFrame *)su;
1208 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1217 barf("relocate_TSO");
1226 scavenge_srt(const StgInfoTable *info)
1228 StgClosure **srt, **srt_end;
1230 /* evacuate the SRT. If srt_len is zero, then there isn't an
1231 * srt field in the info table. That's ok, because we'll
1232 * never dereference it.
1234 srt = stgCast(StgClosure **,info->srt);
1235 srt_end = srt + info->srt_len;
1236 for (; srt < srt_end; srt++) {
1241 /* -----------------------------------------------------------------------------
1242 Scavenge a given step until there are no more objects in this step
1245 evac_gen is set by the caller to be either zero (for a step in a
1246 generation < N) or G where G is the generation of the step being
1249 We sometimes temporarily change evac_gen back to zero if we're
1250 scavenging a mutable object where early promotion isn't such a good
1252 -------------------------------------------------------------------------- */
1256 scavenge(step *step)
1259 const StgInfoTable *info;
1261 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1266 failed_to_evac = rtsFalse;
1268 /* scavenge phase - standard breadth-first scavenging of the
1272 while (bd != step->hp_bd || p < step->hp) {
1274 /* If we're at the end of this block, move on to the next block */
1275 if (bd != step->hp_bd && p == bd->free) {
1281 q = p; /* save ptr to object */
1283 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1284 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1286 info = get_itbl((StgClosure *)p);
1287 switch (info -> type) {
1291 StgBCO* bco = stgCast(StgBCO*,p);
1293 for (i = 0; i < bco->n_ptrs; i++) {
1294 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1296 p += bco_sizeW(bco);
1301 /* treat MVars specially, because we don't want to evacuate the
1302 * mut_link field in the middle of the closure.
1305 StgMVar *mvar = ((StgMVar *)p);
1307 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1308 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1309 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1310 p += sizeofW(StgMVar);
1311 evac_gen = saved_evac_gen;
1324 case IND_OLDGEN_PERM:
1330 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1331 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1332 (StgClosure *)*p = evacuate((StgClosure *)*p);
1334 p += info->layout.payload.nptrs;
1339 /* ignore MUT_CONSs */
1340 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1342 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1343 evac_gen = saved_evac_gen;
1345 p += sizeofW(StgMutVar);
1351 StgBlackHole *bh = (StgBlackHole *)p;
1352 (StgClosure *)bh->blocking_queue =
1353 evacuate((StgClosure *)bh->blocking_queue);
1354 p += BLACKHOLE_sizeW();
1358 case THUNK_SELECTOR:
1360 StgSelector *s = (StgSelector *)p;
1361 s->selectee = evacuate(s->selectee);
1362 p += THUNK_SELECTOR_sizeW();
1368 barf("scavenge:IND???\n");
1370 case CONSTR_INTLIKE:
1371 case CONSTR_CHARLIKE:
1373 case CONSTR_NOCAF_STATIC:
1377 /* Shouldn't see a static object here. */
1378 barf("scavenge: STATIC object\n");
1390 /* Shouldn't see stack frames here. */
1391 barf("scavenge: stack frame\n");
1393 case AP_UPD: /* same as PAPs */
1395 /* Treat a PAP just like a section of stack, not forgetting to
1396 * evacuate the function pointer too...
1399 StgPAP* pap = stgCast(StgPAP*,p);
1401 pap->fun = evacuate(pap->fun);
1402 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1403 p += pap_sizeW(pap);
1409 /* nothing to follow */
1410 p += arr_words_sizeW(stgCast(StgArrWords*,p));
1414 /* follow everything */
1418 evac_gen = 0; /* repeatedly mutable */
1419 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1420 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1421 (StgClosure *)*p = evacuate((StgClosure *)*p);
1423 evac_gen = saved_evac_gen;
1427 case MUT_ARR_PTRS_FROZEN:
1428 /* follow everything */
1430 StgPtr start = p, next;
1432 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1433 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1434 (StgClosure *)*p = evacuate((StgClosure *)*p);
1436 if (failed_to_evac) {
1437 /* we can do this easier... */
1438 evacuate_mutable((StgMutClosure *)start);
1439 failed_to_evac = rtsFalse;
1450 /* chase the link field for any TSOs on the same queue */
1451 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1452 /* scavenge this thread's stack */
1453 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1454 evac_gen = saved_evac_gen;
1455 p += tso_sizeW(tso);
1462 barf("scavenge: unimplemented/strange closure type\n");
1468 /* If we didn't manage to promote all the objects pointed to by
1469 * the current object, then we have to designate this object as
1470 * mutable (because it contains old-to-new generation pointers).
1472 if (failed_to_evac) {
1473 mkMutCons((StgClosure *)q, &generations[evac_gen]);
1474 failed_to_evac = rtsFalse;
1482 /* -----------------------------------------------------------------------------
1483 Scavenge one object.
1485 This is used for objects that are temporarily marked as mutable
1486 because they contain old-to-new generation pointers. Only certain
1487 objects can have this property.
1488 -------------------------------------------------------------------------- */
1490 scavenge_one(StgPtr p)
1495 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1496 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1498 info = get_itbl((StgClosure *)p);
1500 switch (info -> type) {
1508 case IND_OLDGEN_PERM:
1514 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1515 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1516 (StgClosure *)*p = evacuate((StgClosure *)*p);
1524 StgBlackHole *bh = (StgBlackHole *)p;
1525 (StgClosure *)bh->blocking_queue =
1526 evacuate((StgClosure *)bh->blocking_queue);
1530 case THUNK_SELECTOR:
1532 StgSelector *s = (StgSelector *)p;
1533 s->selectee = evacuate(s->selectee);
1537 case AP_UPD: /* same as PAPs */
1539 /* Treat a PAP just like a section of stack, not forgetting to
1540 * evacuate the function pointer too...
1543 StgPAP* pap = stgCast(StgPAP*,p);
1545 pap->fun = evacuate(pap->fun);
1546 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
1551 /* This might happen if for instance a MUT_CONS was pointing to a
1552 * THUNK which has since been updated. The IND_OLDGEN will
1553 * be on the mutable list anyway, so we don't need to do anything
1559 barf("scavenge_one: strange object");
1562 no_luck = failed_to_evac;
1563 failed_to_evac = rtsFalse;
1568 /* -----------------------------------------------------------------------------
1569 Scavenging mutable lists.
1571 We treat the mutable list of each generation > N (i.e. all the
1572 generations older than the one being collected) as roots. We also
1573 remove non-mutable objects from the mutable list at this point.
1574 -------------------------------------------------------------------------- */
1576 static StgMutClosure *
1577 scavenge_mutable_list(StgMutClosure *p, nat gen)
1580 StgMutClosure *start;
1581 StgMutClosure **prev;
1588 failed_to_evac = rtsFalse;
1590 for (; p != END_MUT_LIST; p = *prev) {
1592 /* make sure the info pointer is into text space */
1593 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1594 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1597 switch(info->type) {
1599 case MUT_ARR_PTRS_FROZEN:
1600 /* remove this guy from the mutable list, but follow the ptrs
1601 * anyway (and make sure they get promoted to this gen).
1606 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1608 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1609 (StgClosure *)*q = evacuate((StgClosure *)*q);
1613 if (failed_to_evac) {
1614 failed_to_evac = rtsFalse;
1615 prev = &p->mut_link;
1617 *prev = p->mut_link;
1623 /* follow everything */
1624 prev = &p->mut_link;
1628 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1629 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
1630 (StgClosure *)*q = evacuate((StgClosure *)*q);
1636 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
1637 * it from the mutable list if possible by promoting whatever it
1640 if (p->header.info == &MUT_CONS_info) {
1642 if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
1643 /* didn't manage to promote everything, so leave the
1644 * MUT_CONS on the list.
1646 prev = &p->mut_link;
1648 *prev = p->mut_link;
1652 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1653 prev = &p->mut_link;
1658 /* follow ptrs and remove this from the mutable list */
1660 StgTSO *tso = (StgTSO *)p;
1662 /* Don't bother scavenging if this thread is dead
1664 if (!(tso->whatNext == ThreadComplete ||
1665 tso->whatNext == ThreadKilled)) {
1666 /* Don't need to chase the link field for any TSOs on the
1667 * same queue. Just scavenge this thread's stack
1669 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1672 /* Don't take this TSO off the mutable list - it might still
1673 * point to some younger objects (because we set evac_gen to 0
1676 prev = &tso->mut_link;
1681 case IND_OLDGEN_PERM:
1683 /* Try to pull the indirectee into this generation, so we can
1684 * remove the indirection from the mutable list.
1687 ((StgIndOldGen *)p)->indirectee =
1688 evacuate(((StgIndOldGen *)p)->indirectee);
1691 if (failed_to_evac) {
1692 failed_to_evac = rtsFalse;
1693 prev = &p->mut_link;
1695 *prev = p->mut_link;
1696 /* the mut_link field of an IND_STATIC is overloaded as the
1697 * static link field too (it just so happens that we don't need
1698 * both at the same time), so we need to NULL it out when
1699 * removing this object from the mutable list because the static
1700 * link fields are all assumed to be NULL before doing a major
1708 /* shouldn't have anything else on the mutables list */
1709 barf("scavenge_mutable_object: non-mutable object?");
1716 scavenge_static(void)
1718 StgClosure* p = static_objects;
1719 const StgInfoTable *info;
1721 /* Always evacuate straight to the oldest generation for static
1723 evac_gen = oldest_gen->no;
1725 /* keep going until we've scavenged all the objects on the linked
1727 while (p != END_OF_STATIC_LIST) {
1731 /* make sure the info pointer is into text space */
1732 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1733 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1735 /* Take this object *off* the static_objects list,
1736 * and put it on the scavenged_static_objects list.
1738 static_objects = STATIC_LINK(info,p);
1739 STATIC_LINK(info,p) = scavenged_static_objects;
1740 scavenged_static_objects = p;
1742 switch (info -> type) {
1746 StgInd *ind = (StgInd *)p;
1747 ind->indirectee = evacuate(ind->indirectee);
1749 /* might fail to evacuate it, in which case we have to pop it
1750 * back on the mutable list (and take it off the
1751 * scavenged_static list because the static link and mut link
1752 * pointers are one and the same).
1754 if (failed_to_evac) {
1755 failed_to_evac = rtsFalse;
1756 scavenged_static_objects = STATIC_LINK(info,p);
1757 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
1758 oldest_gen->mut_list = (StgMutClosure *)ind;
1772 next = (P_)p->payload + info->layout.payload.ptrs;
1773 /* evacuate the pointers */
1774 for (q = (P_)p->payload; q < next; q++) {
1775 (StgClosure *)*q = evacuate((StgClosure *)*q);
1781 barf("scavenge_static");
1784 ASSERT(failed_to_evac == rtsFalse);
1786 /* get the next static object from the list. Remeber, there might
1787 * be more stuff on this list now that we've done some evacuating!
1788 * (static_objects is a global)
1794 /* -----------------------------------------------------------------------------
1795 scavenge_stack walks over a section of stack and evacuates all the
1796 objects pointed to by it. We can use the same code for walking
1797 PAPs, since these are just sections of copied stack.
1798 -------------------------------------------------------------------------- */
1801 scavenge_stack(StgPtr p, StgPtr stack_end)
1804 const StgInfoTable* info;
1808 * Each time around this loop, we are looking at a chunk of stack
1809 * that starts with either a pending argument section or an
1810 * activation record.
1813 while (p < stack_end) {
1814 q = *stgCast(StgPtr*,p);
1816 /* If we've got a tag, skip over that many words on the stack */
1817 if (IS_ARG_TAG(stgCast(StgWord,q))) {
1822 /* Is q a pointer to a closure?
1824 if (! LOOKS_LIKE_GHC_INFO(q)) {
1827 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
1828 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
1830 /* otherwise, must be a pointer into the allocation space.
1834 (StgClosure *)*p = evacuate((StgClosure *)q);
1840 * Otherwise, q must be the info pointer of an activation
1841 * record. All activation records have 'bitmap' style layout
1844 info = get_itbl(stgCast(StgClosure*,p));
1846 switch (info->type) {
1848 /* Dynamic bitmap: the mask is stored on the stack */
1850 bitmap = stgCast(StgRetDyn*,p)->liveness;
1851 p = &payloadWord(stgCast(StgRetDyn*,p),0);
1854 /* probably a slow-entry point return address: */
1860 /* Specialised code for update frames, since they're so common.
1861 * We *know* the updatee points to a BLACKHOLE or CAF_BLACKHOLE,
1862 * so just inline the code to evacuate it here.
1866 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1868 StgClosureType type = get_itbl(frame->updatee)->type;
1870 p += sizeofW(StgUpdateFrame);
1871 if (type == EVACUATED) {
1872 frame->updatee = evacuate(frame->updatee);
1875 bdescr *bd = Bdescr((P_)frame->updatee);
1876 ASSERT(type == BLACKHOLE || type == CAF_BLACKHOLE);
1877 if (bd->gen->no >= evac_gen && bd->gen->no > N) { continue; }
1878 to = copy(frame->updatee, BLACKHOLE_sizeW(), bd);
1879 upd_evacuee(frame->updatee,to);
1880 frame->updatee = to;
1885 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
1892 bitmap = info->layout.bitmap;
1895 while (bitmap != 0) {
1896 if ((bitmap & 1) == 0) {
1897 (StgClosure *)*p = evacuate((StgClosure *)*p);
1900 bitmap = bitmap >> 1;
1907 /* large bitmap (> 32 entries) */
1912 StgLargeBitmap *large_bitmap;
1915 large_bitmap = info->layout.large_bitmap;
1918 for (i=0; i<large_bitmap->size; i++) {
1919 bitmap = large_bitmap->bitmap[i];
1920 q = p + sizeof(W_) * 8;
1921 while (bitmap != 0) {
1922 if ((bitmap & 1) == 0) {
1923 (StgClosure *)*p = evacuate((StgClosure *)*p);
1926 bitmap = bitmap >> 1;
1928 if (i+1 < large_bitmap->size) {
1930 (StgClosure *)*p = evacuate((StgClosure *)*p);
1936 /* and don't forget to follow the SRT */
1941 barf("scavenge_stack: weird activation record found on stack.\n");
1946 /*-----------------------------------------------------------------------------
1947 scavenge the large object list.
1949 evac_gen set by caller; similar games played with evac_gen as with
1950 scavenge() - see comment at the top of scavenge(). Most large
1951 objects are (repeatedly) mutable, so most of the time evac_gen will
1953 --------------------------------------------------------------------------- */
1956 scavenge_large(step *step)
1960 const StgInfoTable* info;
1961 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1963 evac_gen = 0; /* most objects are mutable */
1964 bd = step->new_large_objects;
1966 for (; bd != NULL; bd = step->new_large_objects) {
1968 /* take this object *off* the large objects list and put it on
1969 * the scavenged large objects list. This is so that we can
1970 * treat new_large_objects as a stack and push new objects on
1971 * the front when evacuating.
1973 step->new_large_objects = bd->link;
1974 dbl_link_onto(bd, &step->scavenged_large_objects);
1977 info = get_itbl(stgCast(StgClosure*,p));
1979 switch (info->type) {
1981 /* only certain objects can be "large"... */
1985 /* nothing to follow */
1989 /* follow everything */
1993 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1994 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1995 (StgClosure *)*p = evacuate((StgClosure *)*p);
2000 case MUT_ARR_PTRS_FROZEN:
2001 /* follow everything */
2003 StgPtr start = p, next;
2005 evac_gen = saved_evac_gen; /* not really mutable */
2006 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2007 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2008 (StgClosure *)*p = evacuate((StgClosure *)*p);
2011 if (failed_to_evac) {
2012 evacuate_mutable((StgMutClosure *)start);
2019 StgBCO* bco = stgCast(StgBCO*,p);
2021 evac_gen = saved_evac_gen;
2022 for (i = 0; i < bco->n_ptrs; i++) {
2023 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2034 /* chase the link field for any TSOs on the same queue */
2035 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2036 /* scavenge this thread's stack */
2037 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2042 barf("scavenge_large: unknown/strange object");
2048 zeroStaticObjectList(StgClosure* first_static)
2052 const StgInfoTable *info;
2054 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2056 link = STATIC_LINK(info, p);
2057 STATIC_LINK(info,p) = NULL;
2061 /* This function is only needed because we share the mutable link
2062 * field with the static link field in an IND_STATIC, so we have to
2063 * zero the mut_link field before doing a major GC, which needs the
2064 * static link field.
2066 * It doesn't do any harm to zero all the mutable link fields on the
2070 zeroMutableList(StgMutClosure *first)
2072 StgMutClosure *next, *c;
2074 for (c = first; c != END_MUT_LIST; c = next) {
2080 /* -----------------------------------------------------------------------------
2082 -------------------------------------------------------------------------- */
2084 void RevertCAFs(void)
2086 while (enteredCAFs != END_CAF_LIST) {
2087 StgCAF* caf = enteredCAFs;
2089 enteredCAFs = caf->link;
2090 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
2091 SET_INFO(caf,&CAF_UNENTERED_info);
2092 caf->value = stgCast(StgClosure*,0xdeadbeef);
2093 caf->link = stgCast(StgCAF*,0xdeadbeef);
2097 void revertDeadCAFs(void)
2099 StgCAF* caf = enteredCAFs;
2100 enteredCAFs = END_CAF_LIST;
2101 while (caf != END_CAF_LIST) {
2102 StgCAF* next = caf->link;
2104 switch(GET_INFO(caf)->type) {
2107 /* This object has been evacuated, it must be live. */
2108 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
2109 new->link = enteredCAFs;
2115 SET_INFO(caf,&CAF_UNENTERED_info);
2116 caf->value = stgCast(StgClosure*,0xdeadbeef);
2117 caf->link = stgCast(StgCAF*,0xdeadbeef);
2121 barf("revertDeadCAFs: enteredCAFs list corrupted");
2127 /* -----------------------------------------------------------------------------
2128 Sanity code for CAF garbage collection.
2130 With DEBUG turned on, we manage a CAF list in addition to the SRT
2131 mechanism. After GC, we run down the CAF list and blackhole any
2132 CAFs which have been garbage collected. This means we get an error
2133 whenever the program tries to enter a garbage collected CAF.
2135 Any garbage collected CAFs are taken off the CAF list at the same
2137 -------------------------------------------------------------------------- */
2145 const StgInfoTable *info;
2156 ASSERT(info->type == IND_STATIC);
2158 if (STATIC_LINK(info,p) == NULL) {
2159 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
2161 SET_INFO(p,&BLACKHOLE_info);
2162 p = STATIC_LINK2(info,p);
2166 pp = &STATIC_LINK2(info,p);
2173 /* fprintf(stderr, "%d CAFs live\n", i); */
2177 /* -----------------------------------------------------------------------------
2180 Whenever a thread returns to the scheduler after possibly doing
2181 some work, we have to run down the stack and black-hole all the
2182 closures referred to by update frames.
2183 -------------------------------------------------------------------------- */
2186 threadLazyBlackHole(StgTSO *tso)
2188 StgUpdateFrame *update_frame;
2192 stack_end = &tso->stack[tso->stack_size];
2193 update_frame = tso->su;
2196 switch (get_itbl(update_frame)->type) {
2199 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
2203 bh = stgCast(StgBlackHole*,update_frame->updatee);
2205 /* if the thunk is already blackholed, it means we've also
2206 * already blackholed the rest of the thunks on this stack,
2207 * so we can stop early.
2210 /* Don't for now: when we enter a CAF, we create a black hole on
2211 * the heap and make the update frame point to it. Thus the
2212 * above optimisation doesn't apply.
2214 if (bh->header.info != &BLACKHOLE_info
2215 && bh->header.info != &CAF_BLACKHOLE_info) {
2216 SET_INFO(bh,&BLACKHOLE_info);
2217 bh->blocking_queue = END_TSO_QUEUE;
2220 update_frame = update_frame->link;
2224 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
2230 barf("threadPaused");
2235 /* -----------------------------------------------------------------------------
2238 * Code largely pinched from old RTS, then hacked to bits. We also do
2239 * lazy black holing here.
2241 * -------------------------------------------------------------------------- */
2244 threadSqueezeStack(StgTSO *tso)
2246 lnat displacement = 0;
2247 StgUpdateFrame *frame;
2248 StgUpdateFrame *next_frame; /* Temporally next */
2249 StgUpdateFrame *prev_frame; /* Temporally previous */
2251 rtsBool prev_was_update_frame;
2253 bottom = &(tso->stack[tso->stack_size]);
2256 /* There must be at least one frame, namely the STOP_FRAME.
2258 ASSERT((P_)frame < bottom);
2260 /* Walk down the stack, reversing the links between frames so that
2261 * we can walk back up as we squeeze from the bottom. Note that
2262 * next_frame and prev_frame refer to next and previous as they were
2263 * added to the stack, rather than the way we see them in this
2264 * walk. (It makes the next loop less confusing.)
2266 * Could stop if we find an update frame pointing to a black hole,
2267 * but see comment in threadLazyBlackHole().
2271 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
2272 prev_frame = frame->link;
2273 frame->link = next_frame;
2278 /* Now, we're at the bottom. Frame points to the lowest update
2279 * frame on the stack, and its link actually points to the frame
2280 * above. We have to walk back up the stack, squeezing out empty
2281 * update frames and turning the pointers back around on the way
2284 * The bottom-most frame (the STOP_FRAME) has not been altered, and
2285 * we never want to eliminate it anyway. Just walk one step up
2286 * before starting to squeeze. When you get to the topmost frame,
2287 * remember that there are still some words above it that might have
2294 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
2297 * Loop through all of the frames (everything except the very
2298 * bottom). Things are complicated by the fact that we have
2299 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
2300 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
2302 while (frame != NULL) {
2304 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
2305 rtsBool is_update_frame;
2307 next_frame = frame->link;
2308 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
2311 * 1. both the previous and current frame are update frames
2312 * 2. the current frame is empty
2314 if (prev_was_update_frame && is_update_frame &&
2315 (P_)prev_frame == frame_bottom + displacement) {
2317 /* Now squeeze out the current frame */
2318 StgClosure *updatee_keep = prev_frame->updatee;
2319 StgClosure *updatee_bypass = frame->updatee;
2322 fprintf(stderr, "squeezing frame at %p\n", frame);
2325 /* Deal with blocking queues. If both updatees have blocked
2326 * threads, then we should merge the queues into the update
2327 * frame that we're keeping.
2329 * Alternatively, we could just wake them up: they'll just go
2330 * straight to sleep on the proper blackhole! This is less code
2331 * and probably less bug prone, although it's probably much
2334 #if 0 /* do it properly... */
2335 if (GET_INFO(updatee_bypass) == BLACKHOLE_info
2336 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
2338 /* Sigh. It has one. Don't lose those threads! */
2339 if (GET_INFO(updatee_keep) == BLACKHOLE_info
2340 || GET_INFO(updatee_keep) == CAF_BLACKHOLE_info
2342 /* Urgh. Two queues. Merge them. */
2343 P_ keep_tso = ((StgBlackHole *)updatee_keep)->blocking_queue;
2345 while (keep_tso->link != END_TSO_QUEUE) {
2346 keep_tso = keep_tso->link;
2348 keep_tso->link = ((StgBlackHole *)updatee_bypass)->blocking_queue;
2351 /* For simplicity, just swap the BQ for the BH */
2352 P_ temp = updatee_keep;
2354 updatee_keep = updatee_bypass;
2355 updatee_bypass = temp;
2357 /* Record the swap in the kept frame (below) */
2358 prev_frame->updatee = updatee_keep;
2363 TICK_UPD_SQUEEZED();
2364 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
2366 sp = (P_)frame - 1; /* sp = stuff to slide */
2367 displacement += sizeofW(StgUpdateFrame);
2370 /* No squeeze for this frame */
2371 sp = frame_bottom - 1; /* Keep the current frame */
2373 /* Do lazy black-holing.
2375 if (is_update_frame) {
2376 StgBlackHole *bh = (StgBlackHole *)frame->updatee;
2377 if (bh->header.info != &BLACKHOLE_info
2378 && bh->header.info != &CAF_BLACKHOLE_info
2380 SET_INFO(bh,&BLACKHOLE_info);
2381 bh->blocking_queue = END_TSO_QUEUE;
2385 /* Fix the link in the current frame (should point to the frame below) */
2386 frame->link = prev_frame;
2387 prev_was_update_frame = is_update_frame;
2390 /* Now slide all words from sp up to the next frame */
2392 if (displacement > 0) {
2393 P_ next_frame_bottom;
2395 if (next_frame != NULL)
2396 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
2398 next_frame_bottom = tso->sp - 1;
2401 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
2405 while (sp >= next_frame_bottom) {
2406 sp[displacement] = *sp;
2410 (P_)prev_frame = (P_)frame + displacement;
2414 tso->sp += displacement;
2415 tso->su = prev_frame;
2418 /* -----------------------------------------------------------------------------
2421 * We have to prepare for GC - this means doing lazy black holing
2422 * here. We also take the opportunity to do stack squeezing if it's
2424 * -------------------------------------------------------------------------- */
2427 threadPaused(StgTSO *tso)
2429 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
2430 threadSqueezeStack(tso); /* does black holing too */
2432 threadLazyBlackHole(tso);