X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=06f46f704a56c93beef376539175637d159a163b;hb=4e48260554b72e73932a8d5b7c097a047814ab83;hp=cbb939ea6a74ba27ba8fbcc10b63f72d333acbb0;hpb=ed10f2828652819fadfd4783a612c433361169c3;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index cbb939e..06f46f7 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1,5 +1,4 @@ /* ----------------------------------------------------------------------------- - * $Id: GC.c,v 1.158 2003/08/14 15:36:13 simonmar Exp $ * * (c) The GHC Team 1998-2003 * @@ -13,21 +12,21 @@ #include "RtsUtils.h" #include "Apply.h" #include "Storage.h" -#include "StoragePriv.h" +#include "LdvProfile.h" +#include "Updates.h" #include "Stats.h" #include "Schedule.h" -#include "SchedAPI.h" // for ReverCAFs prototype #include "Sanity.h" #include "BlockAlloc.h" #include "MBlock.h" #include "ProfHeap.h" #include "SchedAPI.h" #include "Weak.h" -#include "StablePriv.h" #include "Prelude.h" #include "ParTicky.h" // ToDo: move into Rts.h #include "GCCompact.h" #include "Signals.h" +#include "STM.h" #if defined(GRAN) || defined(PAR) # include "GranSimRts.h" # include "ParallelRts.h" @@ -44,7 +43,6 @@ #endif #include "RetainerProfile.h" -#include "LdvProfile.h" #include @@ -142,11 +140,13 @@ static void mark_root ( StgClosure **root ); // Use a register argument for evacuate, if available. #if __GNUC__ >= 2 -static StgClosure * evacuate (StgClosure *q) __attribute__((regparm(1))); +#define REGPARM1 __attribute__((regparm(1))) #else -static StgClosure * evacuate (StgClosure *q); +#define REGPARM1 #endif +REGPARM1 static StgClosure * evacuate (StgClosure *q); + static void zero_static_object_list ( StgClosure* first_static ); static void zero_mutable_list ( StgMutClosure *first ); @@ -190,31 +190,31 @@ static rtsBool mark_stack_overflowed; static bdescr *oldgen_scan_bd; static StgPtr oldgen_scan; -static inline rtsBool +STATIC_INLINE rtsBool mark_stack_empty(void) { return mark_sp == mark_stack; } -static inline rtsBool +STATIC_INLINE rtsBool mark_stack_full(void) { return mark_sp >= mark_splim; } -static inline void +STATIC_INLINE void reset_mark_stack(void) { mark_sp = mark_stack; } -static inline void +STATIC_INLINE void push_mark_stack(StgPtr p) { *mark_sp++ = p; } -static inline StgPtr +STATIC_INLINE StgPtr pop_mark_stack(void) { return *--mark_sp; @@ -306,7 +306,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) #endif #if defined(DEBUG) && defined(GRAN) - IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n", + IF_DEBUG(gc, debugBelch("@@ Starting garbage collection at %ld (%lx)\n", Now, Now)); #endif @@ -315,6 +315,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) blockUserSignals(); #endif + // tell the STM to discard any cached closures its hoping to re-use + stmPreGCHook(); + // tell the stats department that we've started a GC stat_startGC(); @@ -422,7 +425,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // mark the large objects as not evacuated yet for (bd = stp->large_objects; bd; bd = bd->link) { - bd->flags = BF_LARGE; + bd->flags &= ~BF_EVACUATED; } // for a compacted step, we need to allocate the bitmap @@ -439,17 +442,24 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) stp->bitmap = bitmap_bdescr; bitmap = bitmap_bdescr->start; - IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p", + IF_DEBUG(gc, debugBelch("bitmap_size: %d, bitmap: %p", bitmap_size, bitmap);); // don't forget to fill it with zeros! memset(bitmap, 0, bitmap_size); - // for each block in this step, point to its bitmap from the + // For each block in this step, point to its bitmap from the // block descriptor. for (bd=stp->blocks; bd != NULL; bd = bd->link) { bd->u.bitmap = bitmap; bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE); + + // Also at this point we set the BF_COMPACTED flag + // for this block. The invariant is that + // BF_COMPACTED is always unset, except during GC + // when it is set on those blocks which will be + // compacted. + bd->flags |= BF_COMPACTED; } } } @@ -576,17 +586,6 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) */ markStablePtrTable(mark_root); -#ifdef INTERPRETER - { - /* ToDo: To fix the caf leak, we need to make the commented out - * parts of this code do something sensible - as described in - * the CAF document. - */ - extern void markHugsObjects(void); - markHugsObjects(); - } -#endif - /* ------------------------------------------------------------------------- * Repeatedly scavenge all the areas we know about until there's no * more scavenging to be done. @@ -751,7 +750,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // for a compacted step, just shift the new to-space // onto the front of the now-compacted existing blocks. for (bd = stp->to_blocks; bd != NULL; bd = bd->link) { - bd->flags &= ~BF_EVACUATED; // now from-space + bd->flags &= ~BF_EVACUATED; // now from-space } // tack the new blocks on the end of the existing blocks if (stp->blocks == NULL) { @@ -762,6 +761,11 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) if (next == NULL) { bd->link = stp->to_blocks; } + // NB. this step might not be compacted next + // time, so reset the BF_COMPACTED flags. + // They are set before GC if we're going to + // compact. (search for BF_COMPACTED above). + bd->flags &= ~BF_COMPACTED; } } // add the new blocks to the block tally @@ -771,7 +775,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) stp->blocks = stp->to_blocks; stp->n_blocks = stp->n_to_blocks; for (bd = stp->blocks; bd != NULL; bd = bd->link) { - bd->flags &= ~BF_EVACUATED; // now from-space + bd->flags &= ~BF_EVACUATED; // now from-space } } stp->to_blocks = NULL; @@ -849,10 +853,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) oldest_gen->steps[0].n_blocks > (RtsFlags.GcFlags.compactThreshold * max) / 100))) { oldest_gen->steps[0].is_compacted = 1; -// fprintf(stderr,"compaction: on\n", live); +// debugBelch("compaction: on\n", live); } else { oldest_gen->steps[0].is_compacted = 0; -// fprintf(stderr,"compaction: off\n", live); +// debugBelch("compaction: off\n", live); } // if we're going to go over the maximum heap size, reduce the @@ -884,7 +888,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } #if 0 - fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live, + debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live, min_alloc, size, max); #endif @@ -966,7 +970,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) int pc_free; adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks); - IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); + IF_DEBUG(gc, debugBelch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize; if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ { heapOverflow(); @@ -1194,7 +1198,7 @@ traverse_weak_ptr_list(void) w->link = weak_ptr_list; weak_ptr_list = w; flag = rtsTrue; - IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", + IF_DEBUG(weak, debugBelch("Weak pointer still alive at %p -> %p", w, w->key)); continue; } @@ -1236,7 +1240,7 @@ traverse_weak_ptr_list(void) prev = &old_all_threads; for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { - (StgClosure *)tmp = isAlive((StgClosure *)t); + tmp = (StgTSO *)isAlive((StgClosure *)t); if (tmp != NULL) { t = tmp; @@ -1283,7 +1287,7 @@ traverse_weak_ptr_list(void) StgTSO *t, *tmp, *next; for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { next = t->global_link; - (StgClosure *)tmp = evacuate((StgClosure *)t); + tmp = (StgTSO *)evacuate((StgClosure *)t); tmp->global_link = resurrected_threads; resurrected_threads = tmp; } @@ -1294,6 +1298,7 @@ traverse_weak_ptr_list(void) default: barf("traverse_weak_ptr_list"); + return rtsTrue; } } @@ -1321,7 +1326,7 @@ mark_weak_ptr_list ( StgWeak **list ) // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here ASSERT(w->header.info == &stg_DEAD_WEAK_info || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED); - (StgClosure *)w = evacuate((StgClosure *)w); + w = (StgWeak *)evacuate((StgClosure *)w); *last_w = w; last_w = &(w->link); } @@ -1374,7 +1379,7 @@ isAlive(StgClosure *p) } // check the mark bit for compacted steps - if (bd->step->is_compacted && is_marked((P_)p,bd)) { + if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) { return p; } @@ -1413,19 +1418,19 @@ mark_root(StgClosure **root) *root = evacuate(*root); } -static __inline__ void +STATIC_INLINE void upd_evacuee(StgClosure *p, StgClosure *dest) { // Source object must be in from-space: ASSERT((Bdescr((P_)p)->flags & BF_EVACUATED) == 0); // not true: (ToDo: perhaps it should be) // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED); - p->header.info = &stg_EVACUATED_info; + SET_INFO(p, &stg_EVACUATED_info); ((StgEvacuated *)p)->evacuee = dest; } -static __inline__ StgClosure * +STATIC_INLINE StgClosure * copy(StgClosure *src, nat size, step *stp) { P_ to, from, dest; @@ -1531,7 +1536,7 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) -------------------------------------------------------------------------- */ -static inline void +STATIC_INLINE void evacuate_large(StgPtr p) { bdescr *bd = Bdescr(p); @@ -1657,7 +1662,7 @@ mkMutCons(StgClosure *ptr, generation *gen) extra reads/writes than we save. -------------------------------------------------------------------------- */ -static StgClosure * +REGPARM1 static StgClosure * evacuate(StgClosure *q) { StgClosure *to; @@ -1699,7 +1704,7 @@ loop: /* If the object is in a step that we're compacting, then we * need to use an alternative evacuate procedure. */ - if (bd->step->is_compacted) { + if (bd->flags & BF_COMPACTED) { if (!is_marked((P_)q,bd)) { mark((P_)q,bd); if (mark_stack_full()) { @@ -1880,6 +1885,9 @@ loop: case UPDATE_FRAME: case STOP_FRAME: case CATCH_FRAME: + case CATCH_STM_FRAME: + case CATCH_RETRY_FRAME: + case ATOMICALLY_FRAME: // shouldn't see these barf("evacuate: stack frame at %p\n", q); @@ -1955,7 +1963,7 @@ loop: //ToDo: derive size etc from reverted IP //to = copy(q,size,stp); IF_DEBUG(gc, - belch("@@ evacuate: RBH %p (%s) to %p (%s)", + debugBelch("@@ evacuate: RBH %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); return to; } @@ -1964,7 +1972,7 @@ loop: ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE); to = copy(q,sizeofW(StgBlockedFetch),stp); IF_DEBUG(gc, - belch("@@ evacuate: %p (%s) to %p (%s)", + debugBelch("@@ evacuate: %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); return to; @@ -1975,7 +1983,7 @@ loop: ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); to = copy(q,sizeofW(StgFetchMe),stp); IF_DEBUG(gc, - belch("@@ evacuate: %p (%s) to %p (%s)", + debugBelch("@@ evacuate: %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); return to; @@ -1983,11 +1991,23 @@ loop: ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp); IF_DEBUG(gc, - belch("@@ evacuate: %p (%s) to %p (%s)", + debugBelch("@@ evacuate: %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); return to; #endif + case TREC_HEADER: + return copy(q,sizeofW(StgTRecHeader),stp); + + case TVAR_WAIT_QUEUE: + return copy(q,sizeofW(StgTVarWaitQueue),stp); + + case TVAR: + return copy(q,sizeofW(StgTVar),stp); + + case TREC_CHUNK: + return copy(q,sizeofW(StgTRecChunk),stp); + default: barf("evacuate: strange closure type %d", (int)(info->type)); } @@ -2010,6 +2030,22 @@ loop: thunk is unchanged. -------------------------------------------------------------------------- */ +static inline rtsBool +is_to_space ( StgClosure *p ) +{ + bdescr *bd; + + bd = Bdescr((StgPtr)p); + if (HEAP_ALLOCED(p) && + ((bd->flags & BF_EVACUATED) + || ((bd->flags & BF_COMPACTED) && + is_marked((P_)p,bd)))) { + return rtsTrue; + } else { + return rtsFalse; + } +} + static StgClosure * eval_thunk_selector( nat field, StgSelector * p ) { @@ -2042,17 +2078,30 @@ selector_loop: // eval_thunk_selector(). There are various ways this could // happen: // - // - following an IND_STATIC + // 1. following an IND_STATIC // - // - when the old generation is compacted, the mark phase updates - // from-space pointers to be to-space pointers, and we can't - // reliably tell which we're following (eg. from an IND_STATIC). + // 2. when the old generation is compacted, the mark phase updates + // from-space pointers to be to-space pointers, and we can't + // reliably tell which we're following (eg. from an IND_STATIC). // - // So we use the block-descriptor test to find out if we're in - // to-space. + // 3. compacting GC again: if we're looking at a constructor in + // the compacted generation, it might point directly to objects + // in to-space. We must bale out here, otherwise doing the selection + // will result in a to-space pointer being returned. + // + // (1) is dealt with using a BF_EVACUATED test on the + // selectee. (2) and (3): we can tell if we're looking at an + // object in the compacted generation that might point to + // to-space objects by testing that (a) it is BF_COMPACTED, (b) + // the compacted generation is being collected, and (c) the + // object is marked. Only a marked object may have pointers that + // point to to-space objects, because that happens when + // scavenging. // - if (HEAP_ALLOCED(selectee) && - Bdescr((StgPtr)selectee)->flags & BF_EVACUATED) { + // The to-space test is now embodied in the in_to_space() inline + // function, as it is re-used below. + // + if (is_to_space(selectee)) { goto bale_out; } @@ -2070,9 +2119,21 @@ selector_loop: ASSERT(field < (StgWord32)(info->layout.payload.ptrs + info->layout.payload.nptrs)); - // ToDo: shouldn't we test whether this pointer is in - // to-space? - return selectee->payload[field]; + // Select the right field from the constructor, and check + // that the result isn't in to-space. It might be in + // to-space if, for example, this constructor contains + // pointers to younger-gen objects (and is on the mut-once + // list). + // + { + StgClosure *q; + q = selectee->payload[field]; + if (is_to_space(q)) { + goto bale_out; + } else { + return q; + } + } case IND: case IND_PERM: @@ -2116,21 +2177,22 @@ selector_loop: // For the purposes of LDV profiling, we have destroyed // the original selector thunk. SET_INFO(p, info_ptr); - LDV_recordDead_FILL_SLOP_DYNAMIC(selectee); + LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(selectee); #endif ((StgInd *)selectee)->indirectee = val; SET_INFO(selectee,&stg_IND_info); -#ifdef PROFILING + // For the purposes of LDV profiling, we have created an // indirection. - LDV_recordCreate(selectee); -#endif + LDV_RECORD_CREATE(selectee); + selectee = val; goto selector_loop; } } case AP: + case AP_STACK: case THUNK: case THUNK_1_0: case THUNK_0_1: @@ -2214,7 +2276,7 @@ scavenge_large_srt_bitmap( StgLargeSRT *large_srt ) * srt field in the info table. That's ok, because we'll * never dereference it. */ -static inline void +STATIC_INLINE void scavenge_srt (StgClosure **srt, nat srt_bitmap) { nat bitmap; @@ -2254,31 +2316,31 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap) } -static inline void +STATIC_INLINE void scavenge_thunk_srt(const StgInfoTable *info) { StgThunkInfoTable *thunk_info; thunk_info = itbl_to_thunk_itbl(info); - scavenge_srt((StgClosure **)thunk_info->srt, thunk_info->i.srt_bitmap); + scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap); } -static inline void +STATIC_INLINE void scavenge_fun_srt(const StgInfoTable *info) { StgFunInfoTable *fun_info; fun_info = itbl_to_fun_itbl(info); - scavenge_srt((StgClosure **)fun_info->srt, fun_info->i.srt_bitmap); + scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap); } -static inline void +STATIC_INLINE void scavenge_ret_srt(const StgInfoTable *info) { StgRetInfoTable *ret_info; ret_info = itbl_to_ret_itbl(info); - scavenge_srt((StgClosure **)ret_info->srt, ret_info->i.srt_bitmap); + scavenge_srt((StgClosure **)GET_SRT(ret_info), ret_info->i.srt_bitmap); } /* ----------------------------------------------------------------------------- @@ -2289,7 +2351,7 @@ static void scavengeTSO (StgTSO *tso) { // chase the link field for any TSOs on the same queue - (StgClosure *)tso->link = evacuate((StgClosure *)tso->link); + tso->link = (StgTSO *)evacuate((StgClosure *)tso->link); if ( tso->why_blocked == BlockedOnMVar || tso->why_blocked == BlockedOnBlackHole || tso->why_blocked == BlockedOnException @@ -2305,6 +2367,9 @@ scavengeTSO (StgTSO *tso) (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions); } + // scavange current transaction record + tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec); + // scavenge this thread's stack scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); } @@ -2314,7 +2379,7 @@ scavengeTSO (StgTSO *tso) in PAPs. -------------------------------------------------------------------------- */ -static inline StgPtr +STATIC_INLINE StgPtr scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) { StgPtr p; @@ -2322,23 +2387,23 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) nat size; p = (StgPtr)args; - switch (fun_info->fun_type) { + switch (fun_info->f.fun_type) { case ARG_GEN: - bitmap = BITMAP_BITS(fun_info->bitmap); - size = BITMAP_SIZE(fun_info->bitmap); + bitmap = BITMAP_BITS(fun_info->f.bitmap); + size = BITMAP_SIZE(fun_info->f.bitmap); goto small_bitmap; case ARG_GEN_BIG: - size = ((StgLargeBitmap *)fun_info->bitmap)->size; - scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size); + size = GET_FUN_LARGE_BITMAP(fun_info)->size; + scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); p += size; break; default: - bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]); - size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->fun_type]); + bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); + size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]); small_bitmap: while (size > 0) { if ((bitmap & 1) == 0) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } p++; bitmap = bitmap >> 1; @@ -2349,7 +2414,7 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) return p; } -static inline StgPtr +STATIC_INLINE StgPtr scavenge_PAP (StgPAP *pap) { StgPtr p; @@ -2363,12 +2428,12 @@ scavenge_PAP (StgPAP *pap) p = (StgPtr)pap->payload; size = pap->n_args; - switch (fun_info->fun_type) { + switch (fun_info->f.fun_type) { case ARG_GEN: - bitmap = BITMAP_BITS(fun_info->bitmap); + bitmap = BITMAP_BITS(fun_info->f.bitmap); goto small_bitmap; case ARG_GEN_BIG: - scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size); + scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); p += size; break; case ARG_BCO: @@ -2376,12 +2441,12 @@ scavenge_PAP (StgPAP *pap) p += size; break; default: - bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]); + bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); small_bitmap: size = pap->n_args; while (size > 0) { if ((bitmap & 1) == 0) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } p++; bitmap = bitmap >> 1; @@ -2446,9 +2511,9 @@ scavenge(step *stp) { StgMVar *mvar = ((StgMVar *)p); evac_gen = 0; - (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); - (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); - (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); + mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); + mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); + mvar->value = evacuate((StgClosure *)mvar->value); evac_gen = saved_evac_gen; recordMutable((StgMutClosure *)mvar); failed_to_evac = rtsFalse; // mutable. @@ -2537,7 +2602,7 @@ scavenge(step *stp) end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } p += info->layout.payload.nptrs; break; @@ -2545,10 +2610,10 @@ scavenge(step *stp) case BCO: { StgBCO *bco = (StgBCO *)p; - (StgClosure *)bco->instrs = evacuate((StgClosure *)bco->instrs); - (StgClosure *)bco->literals = evacuate((StgClosure *)bco->literals); - (StgClosure *)bco->ptrs = evacuate((StgClosure *)bco->ptrs); - (StgClosure *)bco->itbls = evacuate((StgClosure *)bco->itbls); + bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs); + bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals); + bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs); + bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls); p += bco_sizeW(bco); break; } @@ -2562,14 +2627,12 @@ scavenge(step *stp) LDV_recordDead((StgClosure *)p, sizeofW(StgInd)); #endif // - // Todo: maybe use SET_HDR() and remove LDV_recordCreate()? + // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()? // SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); -#ifdef PROFILING - // @LDV profiling + // We pretend that p has just been created. - LDV_recordCreate((StgClosure *)p); -#endif + LDV_RECORD_CREATE((StgClosure *)p); } // fall through case IND_OLDGEN_PERM: @@ -2607,8 +2670,8 @@ scavenge(step *stp) case BLACKHOLE_BQ: { StgBlockingQueue *bh = (StgBlockingQueue *)p; - (StgClosure *)bh->blocking_queue = - evacuate((StgClosure *)bh->blocking_queue); + bh->blocking_queue = + (StgTSO *)evacuate((StgClosure *)bh->blocking_queue); recordMutable((StgMutClosure *)bh); failed_to_evac = rtsFalse; p += BLACKHOLE_sizeW(); @@ -2652,7 +2715,7 @@ scavenge(step *stp) evac_gen = 0; // repeatedly mutable next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } evac_gen = saved_evac_gen; recordMutable((StgMutClosure *)q); @@ -2665,9 +2728,14 @@ scavenge(step *stp) { StgPtr next; + // Set the mut_link field to NULL, so that we will put this + // array back on the mutable list if it is subsequently thawed + // by unsafeThaw#. + ((StgMutArrPtrs*)p)->mut_link = NULL; + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } // it's tempting to recordMutable() if failed_to_evac is // false, but that breaks some assumptions (eg. every @@ -2702,7 +2770,7 @@ scavenge(step *stp) recordMutable((StgMutClosure *)to); failed_to_evac = rtsFalse; // mutable anyhow. IF_DEBUG(gc, - belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", + debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", p, info_type(p), (StgClosure *)rbh->blocking_queue)); // ToDo: use size of reverted closure here! p += BLACKHOLE_sizeW(); @@ -2723,7 +2791,7 @@ scavenge(step *stp) recordMutable((StgMutClosure *)bf); } IF_DEBUG(gc, - belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", + debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", bf, info_type((StgClosure *)bf), bf->node, info_type(bf->node))); p += sizeofW(StgBlockedFetch); @@ -2747,13 +2815,72 @@ scavenge(step *stp) recordMutable((StgMutClosure *)fmbq); } IF_DEBUG(gc, - belch("@@ scavenge: %p (%s) exciting, isn't it", + debugBelch("@@ scavenge: %p (%s) exciting, isn't it", p, info_type((StgClosure *)p))); p += sizeofW(StgFetchMeBlockingQueue); break; } #endif + case TVAR_WAIT_QUEUE: + { + StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p); + evac_gen = 0; + wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso); + wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry); + wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry); + evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)wq); + failed_to_evac = rtsFalse; // mutable + p += sizeofW(StgTVarWaitQueue); + break; + } + + case TVAR: + { + StgTVar *tvar = ((StgTVar *) p); + evac_gen = 0; + tvar->current_value = evacuate((StgClosure*)tvar->current_value); + tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry); + evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)tvar); + failed_to_evac = rtsFalse; // mutable + p += sizeofW(StgTVar); + break; + } + + case TREC_HEADER: + { + StgTRecHeader *trec = ((StgTRecHeader *) p); + evac_gen = 0; + trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); + trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); + evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)trec); + failed_to_evac = rtsFalse; // mutable + p += sizeofW(StgTRecHeader); + break; + } + + case TREC_CHUNK: + { + StgWord i; + StgTRecChunk *tc = ((StgTRecChunk *) p); + TRecEntry *e = &(tc -> entries[0]); + evac_gen = 0; + tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); + for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { + e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); + e->expected_value = evacuate((StgClosure*)e->expected_value); + e->new_value = evacuate((StgClosure*)e->new_value); + } + evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)tc); + failed_to_evac = rtsFalse; // mutable + p += sizeofW(StgTRecChunk); + break; + } + default: barf("scavenge: unimplemented/strange closure type %d @ %p", info->type, p); @@ -2808,9 +2935,9 @@ linear_scan: { StgMVar *mvar = ((StgMVar *)p); evac_gen = 0; - (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); - (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); - (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); + mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); + mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); + mvar->value = evacuate((StgClosure *)mvar->value); evac_gen = saved_evac_gen; failed_to_evac = rtsFalse; // mutable. break; @@ -2875,17 +3002,17 @@ linear_scan: end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } break; } case BCO: { StgBCO *bco = (StgBCO *)p; - (StgClosure *)bco->instrs = evacuate((StgClosure *)bco->instrs); - (StgClosure *)bco->literals = evacuate((StgClosure *)bco->literals); - (StgClosure *)bco->ptrs = evacuate((StgClosure *)bco->ptrs); - (StgClosure *)bco->itbls = evacuate((StgClosure *)bco->itbls); + bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs); + bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals); + bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs); + bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls); break; } @@ -2927,8 +3054,8 @@ linear_scan: case BLACKHOLE_BQ: { StgBlockingQueue *bh = (StgBlockingQueue *)p; - (StgClosure *)bh->blocking_queue = - evacuate((StgClosure *)bh->blocking_queue); + bh->blocking_queue = + (StgTSO *)evacuate((StgClosure *)bh->blocking_queue); failed_to_evac = rtsFalse; break; } @@ -2963,7 +3090,7 @@ linear_scan: evac_gen = 0; // repeatedly mutable next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } evac_gen = saved_evac_gen; failed_to_evac = rtsFalse; // mutable anyhow. @@ -2975,9 +3102,14 @@ linear_scan: { StgPtr next; + // Set the mut_link field to NULL, so that we will put this + // array on the mutable list if it is subsequently thawed + // by unsafeThaw#. + ((StgMutArrPtrs*)p)->mut_link = NULL; + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } break; } @@ -3001,12 +3133,12 @@ linear_scan: StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); #endif StgRBH *rbh = (StgRBH *)p; - (StgClosure *)rbh->blocking_queue = - evacuate((StgClosure *)rbh->blocking_queue); + bh->blocking_queue = + (StgTSO *)evacuate((StgClosure *)bh->blocking_queue); recordMutable((StgMutClosure *)rbh); failed_to_evac = rtsFalse; // mutable anyhow. IF_DEBUG(gc, - belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", + debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", p, info_type(p), (StgClosure *)rbh->blocking_queue)); break; } @@ -3025,7 +3157,7 @@ linear_scan: recordMutable((StgMutClosure *)bf); } IF_DEBUG(gc, - belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", + debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", bf, info_type((StgClosure *)bf), bf->node, info_type(bf->node))); break; @@ -3047,12 +3179,67 @@ linear_scan: recordMutable((StgMutClosure *)fmbq); } IF_DEBUG(gc, - belch("@@ scavenge: %p (%s) exciting, isn't it", + debugBelch("@@ scavenge: %p (%s) exciting, isn't it", p, info_type((StgClosure *)p))); break; } #endif // PAR + case TVAR_WAIT_QUEUE: + { + StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p); + evac_gen = 0; + wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso); + wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry); + wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry); + evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)wq); + failed_to_evac = rtsFalse; // mutable + break; + } + + case TVAR: + { + StgTVar *tvar = ((StgTVar *) p); + evac_gen = 0; + tvar->current_value = evacuate((StgClosure*)tvar->current_value); + tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry); + evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)tvar); + failed_to_evac = rtsFalse; // mutable + break; + } + + case TREC_CHUNK: + { + StgWord i; + StgTRecChunk *tc = ((StgTRecChunk *) p); + TRecEntry *e = &(tc -> entries[0]); + evac_gen = 0; + tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); + for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { + e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); + e->expected_value = evacuate((StgClosure*)e->expected_value); + e->new_value = evacuate((StgClosure*)e->new_value); + } + evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)tc); + failed_to_evac = rtsFalse; // mutable + break; + } + + case TREC_HEADER: + { + StgTRecHeader *trec = ((StgTRecHeader *) p); + evac_gen = 0; + trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); + trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); + evac_gen = saved_evac_gen; + recordMutable((StgMutClosure *)trec); + failed_to_evac = rtsFalse; // mutable + break; + } + default: barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p", info->type, p); @@ -3070,7 +3257,7 @@ linear_scan: // start a new linear scan if the mark stack overflowed at some point if (mark_stack_overflowed && oldgen_scan_bd == NULL) { - IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan")); + IF_DEBUG(gc, debugBelch("scavenge_mark_stack: starting linear scan")); mark_stack_overflowed = rtsFalse; oldgen_scan_bd = oldest_gen->steps[0].blocks; oldgen_scan = oldgen_scan_bd->start; @@ -3154,7 +3341,7 @@ scavenge_one(StgPtr p) end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); + *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); } break; } @@ -3185,7 +3372,7 @@ scavenge_one(StgPtr p) recordMutable((StgMutClosure *)p); next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } evac_gen = saved_evac_gen; failed_to_evac = rtsFalse; @@ -3197,9 +3384,14 @@ scavenge_one(StgPtr p) // follow everything StgPtr next; + // Set the mut_link field to NULL, so that we will put this + // array on the mutable list if it is subsequently thawed + // by unsafeThaw#. + ((StgMutArrPtrs*)p)->mut_link = NULL; + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } break; } @@ -3309,7 +3501,7 @@ scavenge_mut_once_list(generation *gen) } else { size = gen->steps[0].scan - start; } - belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_)); + debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_)); } #endif @@ -3389,7 +3581,7 @@ scavenge_mutable_list(generation *gen) end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); + *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); } continue; } @@ -3402,9 +3594,12 @@ scavenge_mutable_list(generation *gen) evac_gen = gen->no; end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); + *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); } evac_gen = 0; + // Set the mut_link field to NULL, so that we will put this + // array back on the mutable list if it is subsequently thawed + // by unsafeThaw#. p->mut_link = NULL; if (failed_to_evac) { failed_to_evac = rtsFalse; @@ -3422,9 +3617,9 @@ scavenge_mutable_list(generation *gen) case MVAR: { StgMVar *mvar = (StgMVar *)p; - (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); - (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); - (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); + mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); + mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); + mvar->value = evacuate((StgClosure *)mvar->value); p->mut_link = gen->mut_list; gen->mut_list = p; continue; @@ -3448,8 +3643,8 @@ scavenge_mutable_list(generation *gen) case BLACKHOLE_BQ: { StgBlockingQueue *bh = (StgBlockingQueue *)p; - (StgClosure *)bh->blocking_queue = - evacuate((StgClosure *)bh->blocking_queue); + bh->blocking_queue = + (StgTSO *)evacuate((StgClosure *)bh->blocking_queue); p->mut_link = gen->mut_list; gen->mut_list = p; continue; @@ -3535,6 +3730,53 @@ scavenge_mutable_list(generation *gen) } #endif + case TVAR_WAIT_QUEUE: + { + StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p); + wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso); + wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry); + wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry); + p->mut_link = gen->mut_list; + gen->mut_list = p; + continue; + } + + case TVAR: + { + StgTVar *tvar = ((StgTVar *) p); + tvar->current_value = evacuate((StgClosure*)tvar->current_value); + tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry); + p->mut_link = gen->mut_list; + gen->mut_list = p; + continue; + } + + case TREC_CHUNK: + { + StgWord i; + StgTRecChunk *tc = ((StgTRecChunk *) p); + TRecEntry *e = &(tc -> entries[0]); + tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); + for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { + e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); + e->expected_value = evacuate((StgClosure*)e->expected_value); + e->new_value = evacuate((StgClosure*)e->new_value); + } + p->mut_link = gen->mut_list; + gen->mut_list = p; + continue; + } + + case TREC_HEADER: + { + StgTRecHeader *trec = ((StgTRecHeader *) p); + trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); + trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); + p->mut_link = gen->mut_list; + gen->mut_list = p; + continue; + } + default: // shouldn't have anything else on the mutables list barf("scavenge_mutable_list: strange object? %d", (int)(info->type)); @@ -3608,7 +3850,7 @@ scavenge_static(void) next = (P_)p->payload + info->layout.payload.ptrs; // evacuate the pointers for (q = (P_)p->payload; q < next; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); + *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); } break; } @@ -3641,7 +3883,7 @@ scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size ) bitmap = large_bitmap->bitmap[b]; for (i = 0; i < size; ) { if ((bitmap & 1) == 0) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } i++; p++; @@ -3654,12 +3896,12 @@ scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size ) } } -static inline StgPtr +STATIC_INLINE StgPtr scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap) { while (size > 0) { if ((bitmap & 1) == 0) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } p++; bitmap = bitmap >> 1; @@ -3682,7 +3924,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) StgWord bitmap; nat size; - //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end)); + //IF_DEBUG(sanity, debugBelch(" scavenging stack between %p and %p", p, stack_end)); /* * Each time around this loop, we are looking at a chunk of stack @@ -3701,6 +3943,9 @@ scavenge_stack(StgPtr p, StgPtr stack_end) continue; // small bitmap (< 32 entries, or 64 on a 64-bit machine) + case CATCH_STM_FRAME: + case CATCH_RETRY_FRAME: + case ATOMICALLY_FRAME: case STOP_FRAME: case CATCH_FRAME: case RET_SMALL: @@ -3713,7 +3958,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) p = scavenge_small_bitmap(p, size, bitmap); follow_srt: - scavenge_srt((StgClosure **)info->srt, info->i.srt_bitmap); + scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap); continue; case RET_BCO: { @@ -3721,7 +3966,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) nat size; p++; - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); bco = (StgBCO *)*p; p++; size = BCO_BITMAP_SIZE(bco); @@ -3736,9 +3981,9 @@ scavenge_stack(StgPtr p, StgPtr stack_end) { nat size; - size = info->i.layout.large_bitmap->size; + size = GET_LARGE_BITMAP(&info->i)->size; p++; - scavenge_large_bitmap(p, info->i.layout.large_bitmap, size); + scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size); p += size; // and don't forget to follow the SRT goto follow_srt; @@ -3754,17 +3999,17 @@ scavenge_stack(StgPtr p, StgPtr stack_end) dyn = ((StgRetDyn *)p)->liveness; // traverse the bitmap first - bitmap = GET_LIVENESS(dyn); + bitmap = RET_DYN_LIVENESS(dyn); p = (P_)&((StgRetDyn *)p)->payload[0]; size = RET_DYN_BITMAP_SIZE; p = scavenge_small_bitmap(p, size, bitmap); // skip over the non-ptr words - p += GET_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE; + p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE; // follow the ptr words - for (size = GET_PTRS(dyn); size > 0; size--) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + for (size = RET_DYN_PTRS(dyn); size > 0; size--) { + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); p++; } continue; @@ -3874,7 +4119,7 @@ revertCAFs( void ) for (c = (StgIndStatic *)caf_list; c != NULL; c = (StgIndStatic *)c->static_link) { - c->header.info = c->saved_info; + SET_INFO(c, c->saved_info); c->saved_info = NULL; // could, but not necessary: c->static_link = NULL; } @@ -3926,7 +4171,7 @@ gcCAFs(void) ASSERT(info->type == IND_STATIC); if (STATIC_LINK(info,p) == NULL) { - IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p)); + IF_DEBUG(gccafs, debugBelch("CAF gc'd at 0x%04lx", (long)p)); // black hole it SET_INFO(p,&stg_BLACKHOLE_info); p = STATIC_LINK2(info,p); @@ -3940,7 +4185,7 @@ gcCAFs(void) } - // belch("%d CAFs live", i); + // debugBelch("%d CAFs live", i); } #endif @@ -3987,7 +4232,7 @@ threadLazyBlackHole(StgTSO *tso) if (bh->header.info != &stg_BLACKHOLE_BQ_info && bh->header.info != &stg_CAF_BLACKHOLE_info) { #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) - belch("Unexpected lazy BHing required at 0x%04x",(int)bh); + debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh); #endif #ifdef PROFILING // @LDV profiling @@ -3995,11 +4240,9 @@ threadLazyBlackHole(StgTSO *tso) LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); #endif SET_INFO(bh,&stg_BLACKHOLE_info); -#ifdef PROFILING - // @LDV profiling + // We pretend that bh has just been created. - LDV_recordCreate(bh); -#endif + LDV_RECORD_CREATE(bh); } frame = (StgClosure *) ((StgUpdateFrame *)frame + 1); @@ -4010,7 +4253,7 @@ threadLazyBlackHole(StgTSO *tso) // normal stack frames; do nothing except advance the pointer default: - (StgPtr)frame += stack_frame_sizeW(frame); + frame = (StgClosure *)((StgPtr)frame + stack_frame_sizeW(frame)); } } } @@ -4116,7 +4359,7 @@ threadSqueezeStack(StgTSO *tso) bh->header.info != &stg_BLACKHOLE_BQ_info && bh->header.info != &stg_CAF_BLACKHOLE_info) { #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) - belch("Unexpected lazy BHing required at 0x%04x",(int)bh); + debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh); #endif #ifdef DEBUG /* zero out the slop so that the sanity checker can tell @@ -4132,8 +4375,8 @@ threadSqueezeStack(StgTSO *tso) * same size as a BLACKHOLE in any case. */ if (bh_info->type != THUNK_SELECTOR) { - for (i = np; i < np + nw; i++) { - ((StgClosure *)bh)->payload[i] = 0; + for (i = 0; i < np + nw; i++) { + ((StgClosure *)bh)->payload[i] = INVALID_OBJECT; } } } @@ -4142,12 +4385,11 @@ threadSqueezeStack(StgTSO *tso) // We pretend that bh is now dead. LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); #endif - // Todo: maybe use SET_HDR() and remove LDV_recordCreate()? + // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()? SET_INFO(bh,&stg_BLACKHOLE_info); -#ifdef PROFILING + // We pretend that bh has just been created. - LDV_recordCreate(bh); -#endif + LDV_RECORD_CREATE(bh); } prev_was_update_frame = rtsTrue; @@ -4202,19 +4444,19 @@ done_traversing: void *gap_start, *next_gap_start, *gap_end; nat chunk_size; - next_gap_start = (void *)gap + sizeof(StgUpdateFrame); + next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame)); sp = next_gap_start; while ((StgPtr)gap > tso->sp) { // we're working in *bytes* now... gap_start = next_gap_start; - gap_end = gap_start - gap->gap_size * sizeof(W_); + gap_end = (void*) ((unsigned char*)gap_start - gap->gap_size * sizeof(W_)); gap = gap->next_gap; - next_gap_start = (void *)gap + sizeof(StgUpdateFrame); + next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame)); - chunk_size = gap_end - next_gap_start; + chunk_size = (unsigned char*)gap_end - (unsigned char*)next_gap_start; sp -= chunk_size; memmove(sp, next_gap_start, chunk_size); } @@ -4252,12 +4494,12 @@ printMutOnceList(generation *gen) p = gen->mut_once_list; next = p->mut_link; - fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list); + debugBelch("@@ Mut once list %p: ", gen->mut_once_list); for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - fprintf(stderr, "%p (%s), ", + debugBelch("%p (%s), ", p, info_type((StgClosure *)p)); } - fputc('\n', stderr); + debugBelch("\n"); } void @@ -4268,15 +4510,15 @@ printMutableList(generation *gen) p = gen->mut_list; next = p->mut_link; - fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list); + debugBelch("@@ Mutable list %p: ", gen->mut_list); for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - fprintf(stderr, "%p (%s), ", + debugBelch("%p (%s), ", p, info_type((StgClosure *)p)); } - fputc('\n', stderr); + debugBelch("\n"); } -static inline rtsBool +STATIC_INLINE rtsBool maybeLarge(StgClosure *closure) { StgInfoTable *info = get_itbl(closure);