1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.2 1998/12/02 13:28:23 simonm Exp $
4 * Two-space garbage collector
6 * ---------------------------------------------------------------------------*/
12 #include "StoragePriv.h"
15 #include "SchedAPI.h" /* for ReverCAFs prototype */
18 #include "BlockAlloc.h"
20 #include "DebugProf.h"
26 static P_ toHp; /* to-space heap pointer */
27 static P_ toHpLim; /* end of current to-space block */
28 static bdescr *toHp_bd; /* descriptor of current to-space block */
29 static nat blocks = 0; /* number of to-space blocks allocated */
30 static bdescr *old_to_space = NULL; /* to-space from the last GC */
31 static nat old_to_space_blocks = 0; /* size of previous to-space */
33 /* STATIC OBJECT LIST.
35 * We maintain a linked list of static objects that are still live.
36 * The requirements for this list are:
38 * - we need to scan the list while adding to it, in order to
39 * scavenge all the static objects (in the same way that
40 * breadth-first scavenging works for dynamic objects).
42 * - we need to be able to tell whether an object is already on
43 * the list, to break loops.
45 * Each static object has a "static link field", which we use for
46 * linking objects on to the list. We use a stack-type list, consing
47 * objects on the front as they are added (this means that the
48 * scavenge phase is depth-first, not breadth-first, but that
51 * A separate list is kept for objects that have been scavenged
52 * already - this is so that we can zero all the marks afterwards.
54 * An object is on the list if its static link field is non-zero; this
55 * means that we have to mark the end of the list with '1', not NULL.
57 #define END_OF_STATIC_LIST stgCast(StgClosure*,1)
58 static StgClosure* static_objects;
59 static StgClosure* scavenged_static_objects;
63 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
64 static rtsBool weak_done; /* all done for this pass */
68 static bdescr *new_large_objects; /* large objects evacuated so far */
69 static bdescr *scavenged_large_objects; /* large objects scavenged */
71 /* -----------------------------------------------------------------------------
72 Static function declarations
73 -------------------------------------------------------------------------- */
75 static StgClosure *evacuate(StgClosure *q);
76 static void zeroStaticObjectList(StgClosure* first_static);
77 static void scavenge_stack(StgPtr p, StgPtr stack_end);
78 static void scavenge_static(void);
79 static void scavenge_large(void);
80 static StgPtr scavenge(StgPtr to_scan);
81 static rtsBool traverse_weak_ptr_list(void);
82 static void revertDeadCAFs(void);
85 static void gcCAFs(void);
88 /* -----------------------------------------------------------------------------
91 This function performs a full copying garbage collection.
92 -------------------------------------------------------------------------- */
94 void GarbageCollect(void (*get_roots)(void))
96 bdescr *bd, *scan_bd, *to_space;
99 nat old_nursery_blocks = nursery_blocks; /* for stats */
100 nat old_live_blocks = old_to_space_blocks; /* ditto */
102 CostCentreStack *prev_CCS;
105 /* tell the stats department that we've started a GC */
108 /* attribute any costs to CCS_GC */
114 /* We might have been called from Haskell land by _ccall_GC, in
115 * which case we need to call threadPaused() because the scheduler
116 * won't have done it.
119 threadPaused(CurrentTSO);
121 /* Approximate how much we allocated: number of blocks in the
122 * nursery + blocks allocated via allocate() - unused nusery blocks.
123 * This leaves a little slop at the end of each block, and doesn't
124 * take into account large objects (ToDo).
126 allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
127 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
128 allocated -= BLOCK_SIZE_W;
131 /* check stack sanity *before* GC (ToDo: check all threads) */
132 /*IF_DEBUG(sanity, checkTSO(MainTSO,0)); */
133 IF_DEBUG(sanity, checkFreeListSanity());
135 static_objects = END_OF_STATIC_LIST;
136 scavenged_static_objects = END_OF_STATIC_LIST;
138 new_large_objects = NULL;
139 scavenged_large_objects = NULL;
141 /* Get a free block for to-space. Extra blocks will be chained on
145 bd->step = 1; /* step 1 identifies to-space */
147 toHpLim = toHp + BLOCK_SIZE_W;
155 /* follow all the roots that the application knows about */
158 /* And don't forget to mark the TSO if we got here direct from
161 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
164 /* Mark the weak pointer list, and prepare to detect dead weak
168 old_weak_ptr_list = weak_ptr_list;
169 weak_ptr_list = NULL;
170 weak_done = rtsFalse;
174 /* ToDo: To fix the caf leak, we need to make the commented out
175 * parts of this code do something sensible - as described in
178 extern void markHugsObjects(void);
180 /* ToDo: This (undefined) function should contain the scavenge
181 * loop immediately below this block of code - but I'm not sure
182 * enough of the details to do this myself.
184 scavengeEverything();
185 /* revert dead CAFs and update enteredCAFs list */
190 /* This will keep the CAFs and the attached BCOs alive
191 * but the values will have been reverted
193 scavengeEverything();
198 /* Then scavenge all the objects we picked up on the first pass.
199 * We may require multiple passes to find all the static objects,
200 * large objects and normal objects.
204 if (static_objects != END_OF_STATIC_LIST) {
207 if (toHp_bd != scan_bd || scan < toHp) {
208 scan = scavenge(scan);
209 scan_bd = Bdescr(scan);
212 if (new_large_objects != NULL) {
216 /* must be last... */
217 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
222 /* tidy up the end of the to-space chain */
223 toHp_bd->free = toHp;
224 toHp_bd->link = NULL;
226 /* revert dead CAFs and update enteredCAFs list */
229 /* mark the garbage collected CAFs as dead */
234 zeroStaticObjectList(scavenged_static_objects);
236 /* approximate amount of live data (doesn't take into account slop
237 * at end of each block). ToDo: this more accurately.
239 live = blocks * BLOCK_SIZE_W + ((lnat)toHp_bd->free -
240 (lnat)toHp_bd->start) / sizeof(W_);
242 /* Free the to-space from the last GC, as it has now been collected.
243 * we may be able to re-use these blocks in creating a new nursery,
244 * below. If not, the blocks will probably be re-used for to-space
247 if (old_to_space != NULL) {
248 freeChain(old_to_space);
250 old_to_space = to_space;
251 old_to_space_blocks = blocks;
253 /* Free the small objects allocated via allocate(), since this will
254 * all have been copied into to-space now.
256 if (small_alloc_list != NULL) {
257 freeChain(small_alloc_list);
259 small_alloc_list = NULL;
261 alloc_blocks_lim = stg_max(blocks,RtsFlags.GcFlags.minAllocAreaSize);
263 /* LARGE OBJECTS. The current live large objects are chained on
264 * scavenged_large_objects, having been moved during garbage
265 * collection from large_alloc_list. Any objects left on
266 * large_alloc list are therefore dead, so we free them here.
270 bd = large_alloc_list;
276 large_alloc_list = scavenged_large_objects;
280 /* check sanity after GC */
281 IF_DEBUG(sanity, checkHeap(to_space,1));
282 /*IF_DEBUG(sanity, checkTSO(MainTSO,1)); */
283 IF_DEBUG(sanity, checkFreeListSanity());
286 /* symbol-table based profiling */
287 heapCensus(to_space);
290 /* set up a new nursery. Allocate a nursery size based on a
291 * function of the amount of live data (currently a factor of 2,
292 * should be configurable (ToDo)). Use the blocks from the old
293 * nursery if possible, freeing up any left over blocks.
295 * If we get near the maximum heap size, then adjust our nursery
296 * size accordingly. If the nursery is the same size as the live
297 * data (L), then we need 3L bytes. We can reduce the size of the
298 * nursery to bring the required memory down near 2L bytes.
300 * A normal 2-space collector would need 4L bytes to give the same
301 * performance we get from 3L bytes, reducing to the same
302 * performance at 2L bytes.
304 if ( blocks * 4 > RtsFlags.GcFlags.maxHeapSize ) {
305 int adjusted_blocks; /* signed on purpose */
308 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
309 IF_DEBUG(gc, fprintf(stderr, "Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
310 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
311 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
314 blocks = adjusted_blocks;
318 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
319 blocks = RtsFlags.GcFlags.minAllocAreaSize;
323 if (nursery_blocks < blocks) {
324 IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
326 nursery = allocNursery(nursery,blocks-nursery_blocks);
328 bdescr *next_bd = nursery;
330 IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
332 for (bd = nursery; nursery_blocks > blocks; nursery_blocks--) {
340 current_nursery = nursery;
341 nursery_blocks = blocks;
343 /* set the step number for each block in the nursery to zero */
344 for (bd = nursery; bd != NULL; bd = bd->link) {
346 bd->free = bd->start;
348 for (bd = to_space; bd != NULL; bd = bd->link) {
351 for (bd = large_alloc_list; bd != NULL; bd = bd->link) {
356 /* check that we really have the right number of blocks in the
357 * nursery, or things could really get screwed up.
361 for (bd = nursery; bd != NULL; bd = bd->link) {
362 ASSERT(bd->free == bd->start);
363 ASSERT(bd->step == 0);
366 ASSERT(i == nursery_blocks);
370 /* start any pending finalisers */
371 scheduleFinalisers(old_weak_ptr_list);
373 /* restore enclosing cost centre */
378 /* ok, GC over: tell the stats department what happened. */
379 stat_endGC(allocated,
380 (old_nursery_blocks + old_live_blocks) * BLOCK_SIZE_W,
384 /* -----------------------------------------------------------------------------
387 traverse_weak_ptr_list is called possibly many times during garbage
388 collection. It returns a flag indicating whether it did any work
389 (i.e. called evacuate on any live pointers).
391 Invariant: traverse_weak_ptr_list is called when the heap is in an
392 idempotent state. That means that there are no pending
393 evacuate/scavenge operations. This invariant helps the weak
394 pointer code decide which weak pointers are dead - if there are no
395 new live weak pointers, then all the currently unreachable ones are
397 -------------------------------------------------------------------------- */
400 traverse_weak_ptr_list(void)
402 StgWeak *w, **last_w, *next_w;
404 const StgInfoTable *info;
405 rtsBool flag = rtsFalse;
407 if (weak_done) { return rtsFalse; }
409 last_w = &old_weak_ptr_list;
410 for (w = old_weak_ptr_list; w; w = next_w) {
413 info = get_itbl(target);
414 switch (info->type) {
420 case IND_OLDGEN_PERM:
421 /* follow indirections */
422 target = ((StgInd *)target)->indirectee;
426 /* If key is alive, evacuate value and finaliser and
427 * place weak ptr on new weak ptr list.
429 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p\n", w));
430 w->key = ((StgEvacuated *)target)->evacuee;
431 w->value = evacuate(w->value);
432 w->finaliser = evacuate(w->finaliser);
434 /* remove this weak ptr from the old_weak_ptr list */
437 /* and put it on the new weak ptr list */
439 w->link = weak_ptr_list;
444 default: /* key is dead */
451 /* If we didn't make any changes, then we can go round and kill all
452 * the dead weak pointers. The old_weak_ptr list is used as a list
453 * of pending finalisers later on.
455 if (flag == rtsFalse) {
456 for (w = old_weak_ptr_list; w; w = w->link) {
457 w->value = evacuate(w->value);
458 w->finaliser = evacuate(w->finaliser);
466 StgClosure *MarkRoot(StgClosure *root)
468 root = evacuate(root);
472 static __inline__ StgClosure *copy(StgClosure *src, W_ size)
476 if (toHp + size >= toHpLim) {
477 bdescr *bd = allocBlock();
478 toHp_bd->free = toHp;
480 bd->step = 1; /* step 1 identifies to-space */
482 toHpLim = toHp + BLOCK_SIZE_W;
489 for(to = dest, from = (P_)src; size>0; --size) {
492 return (StgClosure *)dest;
495 static __inline__ void upd_evacuee(StgClosure *p, StgClosure *dest)
497 StgEvacuated *q = (StgEvacuated *)p;
499 SET_INFO(q,&EVACUATED_info);
503 /* -----------------------------------------------------------------------------
504 Evacuate a large object
506 This just consists of removing the object from the (doubly-linked)
507 large_alloc_list, and linking it on to the (singly-linked)
508 new_large_objects list, from where it will be scavenged later.
509 -------------------------------------------------------------------------- */
511 static inline void evacuate_large(StgPtr p)
513 bdescr *bd = Bdescr(p);
515 /* should point to the beginning of the block */
516 ASSERT(((W_)p & BLOCK_MASK) == 0);
518 /* already evacuated? */
523 /* remove from large_alloc_list */
525 bd->back->link = bd->link;
526 } else { /* first object in the list */
527 large_alloc_list = bd->link;
530 bd->link->back = bd->back;
533 /* link it on to the evacuated large object list */
534 bd->link = new_large_objects;
535 new_large_objects = bd;
539 /* -----------------------------------------------------------------------------
542 This is called (eventually) for every live object in the system.
543 -------------------------------------------------------------------------- */
545 static StgClosure *evacuate(StgClosure *q)
548 const StgInfoTable *info;
551 /* make sure the info pointer is into text space */
552 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
553 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
556 switch (info -> type) {
559 to = copy(q,bco_sizeW(stgCast(StgBCO*,q)));
567 case IND_OLDGEN_PERM:
574 to = copy(q,sizeW_fromITBL(info));
580 to = copy(q,BLACKHOLE_sizeW());
586 const StgInfoTable* selectee_info;
587 StgClosure* selectee = stgCast(StgSelector*,q)->selectee;
590 selectee_info = get_itbl(selectee);
591 switch (selectee_info->type) {
595 StgNat32 offset = info->layout.selector_offset;
597 /* check that the size is in range */
599 (StgNat32)(selectee_info->layout.payload.ptrs +
600 selectee_info->layout.payload.nptrs));
602 /* perform the selection! */
603 q = selectee->payload[offset];
605 /* if we're already in to-space, there's no need to continue
606 * with the evacuation, just update the source address with
607 * a pointer to the (evacuated) constructor field.
609 if (IS_USER_PTR(q) && Bdescr((P_)q)->step == 1) {
613 /* otherwise, carry on and evacuate this constructor field,
614 * (but not the constructor itself)
623 case IND_OLDGEN_PERM:
624 selectee = stgCast(StgInd *,selectee)->indirectee;
628 selectee = stgCast(StgCAF *,selectee)->value;
632 selectee = stgCast(StgEvacuated*,selectee)->evacuee;
638 /* aargh - do recursively???? */
642 /* not evaluated yet */
646 barf("evacuate: THUNK_SELECTOR: strange selectee");
649 to = copy(q,THUNK_SELECTOR_sizeW());
655 /* follow chains of indirections, don't evacuate them */
656 q = stgCast(StgInd*,q)->indirectee;
663 /* don't want to evacuate these, but we do want to follow pointers
664 * from SRTs - see scavenge_static.
667 /* put the object on the static list, if necessary.
669 if (STATIC_LINK(info,(StgClosure *)q) == NULL) {
670 STATIC_LINK(info,(StgClosure *)q) = static_objects;
671 static_objects = (StgClosure *)q;
676 case CONSTR_CHARLIKE:
677 case CONSTR_NOCAF_STATIC:
678 /* no need to put these on the static linked list, they don't need
693 /* shouldn't see these */
694 barf("evacuate: stack frame\n");
698 /* these are special - the payload is a copy of a chunk of stack,
700 to = copy(q,pap_sizeW(stgCast(StgPAP*,q)));
705 /* Already evacuated, just return the forwarding address */
706 return stgCast(StgEvacuated*,q)->evacuee;
711 case MUT_ARR_PTRS_FROZEN:
714 nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
716 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
717 evacuate_large((P_)q);
720 /* just copy the block */
729 StgTSO *tso = stgCast(StgTSO *,q);
730 nat size = tso_sizeW(tso);
733 /* Large TSOs don't get moved, so no relocation is required.
735 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
736 evacuate_large((P_)q);
739 /* To evacuate a small TSO, we need to relocate the update frame
743 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso));
745 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
747 /* relocate the stack pointers... */
748 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
749 new_tso->sp = (StgPtr)new_tso->sp + diff;
750 new_tso->splim = (StgPtr)new_tso->splim + diff;
752 relocate_TSO(tso, new_tso);
753 upd_evacuee(q,(StgClosure *)new_tso);
754 return (StgClosure *)new_tso;
760 fprintf(stderr,"evacuate: unimplemented/strange closure type\n");
764 barf("evacuate: strange closure type");
770 /* -----------------------------------------------------------------------------
771 relocate_TSO is called just after a TSO has been copied from src to
772 dest. It adjusts the update frame list for the new location.
773 -------------------------------------------------------------------------- */
776 relocate_TSO(StgTSO *src, StgTSO *dest)
783 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
787 while ((P_)su < dest->stack + dest->stack_size) {
788 switch (get_itbl(su)->type) {
790 /* GCC actually manages to common up these three cases! */
793 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
798 cf = (StgCatchFrame *)su;
799 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
804 sf = (StgSeqFrame *)su;
805 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
814 barf("relocate_TSO");
823 evacuate_srt(const StgInfoTable *info)
825 StgClosure **srt, **srt_end;
827 /* evacuate the SRT. If srt_len is zero, then there isn't an
828 * srt field in the info table. That's ok, because we'll
829 * never dereference it.
831 srt = stgCast(StgClosure **,info->srt);
832 srt_end = srt + info->srt_len;
833 for (; srt < srt_end; srt++) {
839 scavenge(StgPtr to_scan)
842 const StgInfoTable *info;
848 /* scavenge phase - standard breadth-first scavenging of the
852 while (bd != toHp_bd || p < toHp) {
854 /* If we're at the end of this block, move on to the next block */
855 if (bd != toHp_bd && p == bd->free) {
861 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
862 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
864 info = get_itbl((StgClosure *)p);
865 switch (info -> type) {
869 StgBCO* bco = stgCast(StgBCO*,p);
871 for (i = 0; i < bco->n_ptrs; i++) {
872 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
889 case IND_OLDGEN_PERM:
895 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
896 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
897 (StgClosure *)*p = evacuate((StgClosure *)*p);
899 p += info->layout.payload.nptrs;
906 StgBlackHole *bh = (StgBlackHole *)p;
907 (StgClosure *)bh->blocking_queue =
908 evacuate((StgClosure *)bh->blocking_queue);
909 p += BLACKHOLE_sizeW();
915 StgSelector *s = (StgSelector *)p;
916 s->selectee = evacuate(s->selectee);
917 p += THUNK_SELECTOR_sizeW();
923 barf("scavenge:IND???\n");
926 case CONSTR_CHARLIKE:
928 case CONSTR_NOCAF_STATIC:
932 /* Shouldn't see a static object here. */
933 barf("scavenge: STATIC object\n");
945 /* Shouldn't see stack frames here. */
946 barf("scavenge: stack frame\n");
948 case AP_UPD: /* same as PAPs */
950 /* Treat a PAP just like a section of stack, not forgetting to
951 * evacuate the function pointer too...
954 StgPAP* pap = stgCast(StgPAP*,p);
956 pap->fun = evacuate(pap->fun);
957 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
964 /* nothing to follow */
965 p += arr_words_sizeW(stgCast(StgArrWords*,p));
970 case MUT_ARR_PTRS_FROZEN:
971 /* follow everything */
975 next = p + arr_ptrs_sizeW(stgCast(StgArrPtrs*,p));
976 for (p = (P_)((StgArrPtrs *)p)->payload; p < next; p++) {
977 (StgClosure *)*p = evacuate((StgClosure *)*p);
987 /* chase the link field for any TSOs on the same queue */
988 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
989 /* scavenge this thread's stack */
990 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
998 barf("scavenge: unimplemented/strange closure type\n");
1007 /* scavenge_static is the scavenge code for a static closure.
1011 scavenge_static(void)
1013 StgClosure* p = static_objects;
1014 const StgInfoTable *info;
1016 /* keep going until we've scavenged all the objects on the linked
1018 while (p != END_OF_STATIC_LIST) {
1020 /* make sure the info pointer is into text space */
1021 ASSERT(p && LOOKS_LIKE_GHC_INFO(GET_INFO(p)));
1022 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
1023 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
1027 /* Take this object *off* the static_objects list,
1028 * and put it on the scavenged_static_objects list.
1030 static_objects = STATIC_LINK(info,p);
1031 STATIC_LINK(info,p) = scavenged_static_objects;
1032 scavenged_static_objects = p;
1034 switch (info -> type) {
1038 StgInd *ind = (StgInd *)p;
1039 ind->indirectee = evacuate(ind->indirectee);
1052 next = (P_)p->payload + info->layout.payload.ptrs;
1053 /* evacuate the pointers */
1054 for (q = (P_)p->payload; q < next; q++) {
1055 (StgClosure *)*q = evacuate((StgClosure *)*q);
1061 barf("scavenge_static");
1064 /* get the next static object from the list. Remeber, there might
1065 * be more stuff on this list now that we've done some evacuating!
1066 * (static_objects is a global)
1072 /* -----------------------------------------------------------------------------
1073 scavenge_stack walks over a section of stack and evacuates all the
1074 objects pointed to by it. We can use the same code for walking
1075 PAPs, since these are just sections of copied stack.
1076 -------------------------------------------------------------------------- */
1079 scavenge_stack(StgPtr p, StgPtr stack_end)
1082 const StgInfoTable* info;
1086 * Each time around this loop, we are looking at a chunk of stack
1087 * that starts with either a pending argument section or an
1088 * activation record.
1091 while (p < stack_end) {
1092 q = *stgCast(StgPtr*,p);
1094 /* If we've got a tag, skip over that many words on the stack */
1095 if (IS_ARG_TAG(stgCast(StgWord,q))) {
1100 /* Is q a pointer to a closure?
1102 if (! LOOKS_LIKE_GHC_INFO(q)) {
1105 if (LOOKS_LIKE_STATIC(q)) { /* Is it a static closure? */
1106 ASSERT(closure_STATIC(stgCast(StgClosure*,q)));
1108 /* otherwise, must be a pointer into the allocation space.
1112 (StgClosure *)*p = evacuate((StgClosure *)q);
1118 * Otherwise, q must be the info pointer of an activation
1119 * record. All activation records have 'bitmap' style layout
1122 info = get_itbl(stgCast(StgClosure*,p));
1124 switch (info->type) {
1126 /* Dynamic bitmap: the mask is stored on the stack */
1128 bitmap = stgCast(StgRetDyn*,p)->liveness;
1129 p = &payloadWord(stgCast(StgRetDyn*,p),0);
1132 /* probably a slow-entry point return address: */
1138 /* Specialised code for update frames, since they're so common.
1139 * We *know* the updatee points to a BLACKHOLE or CAF_BLACKHOLE,
1140 * so just inline the code to evacuate it here.
1144 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1146 StgClosureType type = get_itbl(frame->updatee)->type;
1148 if (type == EVACUATED) {
1149 frame->updatee = evacuate(frame->updatee);
1150 p += sizeofW(StgUpdateFrame);
1153 ASSERT(type == BLACKHOLE || type == CAF_BLACKHOLE);
1154 to = copy(frame->updatee, BLACKHOLE_sizeW());
1155 upd_evacuee(frame->updatee,to);
1156 frame->updatee = to;
1157 p += sizeofW(StgUpdateFrame);
1162 /* small bitmap (< 32 entries) */
1169 bitmap = info->layout.bitmap;
1172 while (bitmap != 0) {
1173 if ((bitmap & 1) == 0) {
1174 (StgClosure *)*p = evacuate((StgClosure *)*p);
1177 bitmap = bitmap >> 1;
1184 /* large bitmap (> 32 entries) */
1188 StgLargeBitmap *large_bitmap;
1191 large_bitmap = info->layout.large_bitmap;
1194 for (i=0; i<large_bitmap->size; i++) {
1195 bitmap = large_bitmap->bitmap[i];
1196 while (bitmap != 0) {
1197 if ((bitmap & 1) == 0) {
1198 (StgClosure *)*p = evacuate((StgClosure *)*p);
1201 bitmap = bitmap >> 1;
1205 /* and don't forget to follow the SRT */
1210 barf("scavenge_stack: weird activation record found on stack.\n");
1215 /*-----------------------------------------------------------------------------
1216 scavenge the large object list.
1217 --------------------------------------------------------------------------- */
1220 scavenge_large(void)
1224 const StgInfoTable* info;
1226 bd = new_large_objects;
1228 for (; bd != NULL; bd = new_large_objects) {
1230 /* take this object *off* the large objects list and put it on
1231 * the scavenged large objects list. This is so that we can
1232 * treat new_large_objects as a stack and push new objects on
1233 * the front when evacuating.
1235 new_large_objects = bd->link;
1236 /* scavenged_large_objects is doubly linked */
1237 bd->link = scavenged_large_objects;
1239 if (scavenged_large_objects) {
1240 scavenged_large_objects->back = bd;
1242 scavenged_large_objects = bd;
1245 info = get_itbl(stgCast(StgClosure*,p));
1247 switch (info->type) {
1249 /* only certain objects can be "large"... */
1253 /* nothing to follow */
1258 case MUT_ARR_PTRS_FROZEN:
1259 /* follow everything */
1263 next = p + arr_ptrs_sizeW(stgCast(StgArrPtrs*,p));
1264 for (p = (P_)((StgArrPtrs *)p)->payload; p < next; p++) {
1265 (StgClosure *)*p = evacuate((StgClosure *)*p);
1272 StgBCO* bco = stgCast(StgBCO*,p);
1274 for (i = 0; i < bco->n_ptrs; i++) {
1275 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1285 /* chase the link field for any TSOs on the same queue */
1286 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1287 /* scavenge this thread's stack */
1288 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1293 barf("scavenge_large: unknown/strange object");
1298 zeroStaticObjectList(StgClosure* first_static)
1302 const StgInfoTable *info;
1304 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
1306 link = STATIC_LINK(info, p);
1307 STATIC_LINK(info,p) = NULL;
1311 /* -----------------------------------------------------------------------------
1314 -------------------------------------------------------------------------- */
1316 void RevertCAFs(void)
1318 while (enteredCAFs != END_CAF_LIST) {
1319 StgCAF* caf = enteredCAFs;
1320 const StgInfoTable *info = get_itbl(caf);
1322 enteredCAFs = caf->link;
1323 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
1324 SET_INFO(caf,&CAF_UNENTERED_info);
1325 caf->value = stgCast(StgClosure*,0xdeadbeef);
1326 caf->link = stgCast(StgCAF*,0xdeadbeef);
1330 void revertDeadCAFs(void)
1332 StgCAF* caf = enteredCAFs;
1333 enteredCAFs = END_CAF_LIST;
1334 while (caf != END_CAF_LIST) {
1335 StgCAF* next = caf->link;
1337 switch(GET_INFO(caf)->type) {
1340 /* This object has been evacuated, it must be live. */
1341 StgCAF* new = stgCast(StgCAF*,stgCast(StgEvacuated*,caf)->evacuee);
1342 new->link = enteredCAFs;
1348 SET_INFO(caf,&CAF_UNENTERED_info);
1349 caf->value = stgCast(StgClosure*,0xdeadbeef);
1350 caf->link = stgCast(StgCAF*,0xdeadbeef);
1354 barf("revertDeadCAFs: enteredCAFs list corrupted");
1360 /* -----------------------------------------------------------------------------
1361 Sanity code for CAF garbage collection.
1363 With DEBUG turned on, we manage a CAF list in addition to the SRT
1364 mechanism. After GC, we run down the CAF list and blackhole any
1365 CAFs which have been garbage collected. This means we get an error
1366 whenever the program tries to enter a garbage collected CAF.
1368 Any garbage collected CAFs are taken off the CAF list at the same
1370 -------------------------------------------------------------------------- */
1378 const StgInfoTable *info;
1389 ASSERT(info->type == IND_STATIC);
1391 if (STATIC_LINK(info,p) == NULL) {
1392 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
1394 SET_INFO(p,&BLACKHOLE_info);
1395 p = STATIC_LINK2(info,p);
1399 pp = &STATIC_LINK2(info,p);
1406 /* fprintf(stderr, "%d CAFs live\n", i); */
1410 /* -----------------------------------------------------------------------------
1413 Whenever a thread returns to the scheduler after possibly doing
1414 some work, we have to run down the stack and black-hole all the
1415 closures referred to by update frames.
1416 -------------------------------------------------------------------------- */
1419 threadLazyBlackHole(StgTSO *tso)
1421 StgUpdateFrame *update_frame;
1425 stack_end = &tso->stack[tso->stack_size];
1426 update_frame = tso->su;
1429 switch (get_itbl(update_frame)->type) {
1432 update_frame = stgCast(StgCatchFrame*,update_frame)->link;
1436 bh = stgCast(StgBlackHole*,update_frame->updatee);
1438 /* if the thunk is already blackholed, it means we've also
1439 * already blackholed the rest of the thunks on this stack,
1440 * so we can stop early.
1443 /* Don't for now: when we enter a CAF, we create a black hole on
1444 * the heap and make the update frame point to it. Thus the
1445 * above optimisation doesn't apply.
1447 if (bh->header.info != &BLACKHOLE_info
1448 && bh->header.info != &CAF_BLACKHOLE_info) {
1449 SET_INFO(bh,&BLACKHOLE_info);
1450 bh->blocking_queue = stgCast(StgTSO*,&END_TSO_QUEUE_closure);
1453 update_frame = update_frame->link;
1457 update_frame = stgCast(StgSeqFrame*,update_frame)->link;
1463 barf("threadPaused");
1468 /* -----------------------------------------------------------------------------
1471 * Code largely pinched from old RTS, then hacked to bits. We also do
1472 * lazy black holing here.
1474 * -------------------------------------------------------------------------- */
1477 threadSqueezeStack(StgTSO *tso)
1479 lnat displacement = 0;
1480 StgUpdateFrame *frame;
1481 StgUpdateFrame *next_frame; /* Temporally next */
1482 StgUpdateFrame *prev_frame; /* Temporally previous */
1484 rtsBool prev_was_update_frame;
1486 bottom = &(tso->stack[tso->stack_size]);
1489 /* There must be at least one frame, namely the STOP_FRAME.
1491 ASSERT((P_)frame < bottom);
1493 /* Walk down the stack, reversing the links between frames so that
1494 * we can walk back up as we squeeze from the bottom. Note that
1495 * next_frame and prev_frame refer to next and previous as they were
1496 * added to the stack, rather than the way we see them in this
1497 * walk. (It makes the next loop less confusing.)
1499 * Could stop if we find an update frame pointing to a black hole,
1500 * but see comment in threadLazyBlackHole().
1504 while ((P_)frame < bottom - 1) { /* bottom - 1 is the STOP_FRAME */
1505 prev_frame = frame->link;
1506 frame->link = next_frame;
1511 /* Now, we're at the bottom. Frame points to the lowest update
1512 * frame on the stack, and its link actually points to the frame
1513 * above. We have to walk back up the stack, squeezing out empty
1514 * update frames and turning the pointers back around on the way
1517 * The bottom-most frame (the STOP_FRAME) has not been altered, and
1518 * we never want to eliminate it anyway. Just walk one step up
1519 * before starting to squeeze. When you get to the topmost frame,
1520 * remember that there are still some words above it that might have
1527 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
1530 * Loop through all of the frames (everything except the very
1531 * bottom). Things are complicated by the fact that we have
1532 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
1533 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
1535 while (frame != NULL) {
1537 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
1538 rtsBool is_update_frame;
1540 next_frame = frame->link;
1541 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
1544 * 1. both the previous and current frame are update frames
1545 * 2. the current frame is empty
1547 if (prev_was_update_frame && is_update_frame &&
1548 (P_)prev_frame == frame_bottom + displacement) {
1550 /* Now squeeze out the current frame */
1551 StgClosure *updatee_keep = prev_frame->updatee;
1552 StgClosure *updatee_bypass = frame->updatee;
1555 fprintf(stderr, "squeezing frame at %p\n", frame);
1558 /* Deal with blocking queues. If both updatees have blocked
1559 * threads, then we should merge the queues into the update
1560 * frame that we're keeping.
1562 * Alternatively, we could just wake them up: they'll just go
1563 * straight to sleep on the proper blackhole! This is less code
1564 * and probably less bug prone, although it's probably much
1567 #if 0 /* do it properly... */
1568 if (GET_INFO(updatee_bypass) == BLACKHOLE_info
1569 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
1571 /* Sigh. It has one. Don't lose those threads! */
1572 if (GET_INFO(updatee_keep) == BLACKHOLE_info
1573 || GET_INFO(updatee_keep) == CAF_BLACKHOLE_info
1575 /* Urgh. Two queues. Merge them. */
1576 P_ keep_tso = ((StgBlackHole *)updatee_keep)->blocking_queue;
1578 while (keep_tso->link != END_TSO_QUEUE) {
1579 keep_tso = keep_tso->link;
1581 keep_tso->link = ((StgBlackHole *)updatee_bypass)->blocking_queue;
1584 /* For simplicity, just swap the BQ for the BH */
1585 P_ temp = updatee_keep;
1587 updatee_keep = updatee_bypass;
1588 updatee_bypass = temp;
1590 /* Record the swap in the kept frame (below) */
1591 prev_frame->updatee = updatee_keep;
1596 TICK_UPD_SQUEEZED();
1597 UPD_IND(updatee_bypass, updatee_keep); /* this wakes the threads up */
1599 sp = (P_)frame - 1; /* sp = stuff to slide */
1600 displacement += sizeofW(StgUpdateFrame);
1603 /* No squeeze for this frame */
1604 sp = frame_bottom - 1; /* Keep the current frame */
1606 /* Do lazy black-holing.
1608 if (is_update_frame) {
1609 StgBlackHole *bh = (StgBlackHole *)frame->updatee;
1610 if (bh->header.info != &BLACKHOLE_info
1611 && bh->header.info != &CAF_BLACKHOLE_info
1613 SET_INFO(bh,&BLACKHOLE_info);
1614 bh->blocking_queue = stgCast(StgTSO*,&END_TSO_QUEUE_closure);
1618 /* Fix the link in the current frame (should point to the frame below) */
1619 frame->link = prev_frame;
1620 prev_was_update_frame = is_update_frame;
1623 /* Now slide all words from sp up to the next frame */
1625 if (displacement > 0) {
1626 P_ next_frame_bottom;
1628 if (next_frame != NULL)
1629 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
1631 next_frame_bottom = tso->sp - 1;
1634 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
1638 while (sp >= next_frame_bottom) {
1639 sp[displacement] = *sp;
1643 (P_)prev_frame = (P_)frame + displacement;
1647 tso->sp += displacement;
1648 tso->su = prev_frame;
1651 /* -----------------------------------------------------------------------------
1654 * We have to prepare for GC - this means doing lazy black holing
1655 * here. We also take the opportunity to do stack squeezing if it's
1657 * -------------------------------------------------------------------------- */
1660 threadPaused(StgTSO *tso)
1662 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
1663 threadSqueezeStack(tso); /* does black holing too */
1665 threadLazyBlackHole(tso);