X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=41ca61db0f2d222ee53f5a5e1720b978b317f240;hb=daa4d18454157f664a0ff333bb592bb0bd6b4edc;hp=1805d0b4054dd218ff4c96b069b6adcb898b4c54;hpb=fbe64026eac8b559693868f38e6a9e6dad8a8ec6;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 1805d0b..41ca61d 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1,5 +1,4 @@ /* ----------------------------------------------------------------------------- - * $Id: GC.c,v 1.148 2003/03/19 18:41:18 sof Exp $ * * (c) The GHC Team 1998-2003 * @@ -12,22 +11,23 @@ #include "RtsFlags.h" #include "RtsUtils.h" #include "Apply.h" +#include "OSThreads.h" #include "Storage.h" -#include "StoragePriv.h" +#include "LdvProfile.h" +#include "Updates.h" #include "Stats.h" #include "Schedule.h" -#include "SchedAPI.h" // for ReverCAFs prototype #include "Sanity.h" #include "BlockAlloc.h" #include "MBlock.h" #include "ProfHeap.h" #include "SchedAPI.h" #include "Weak.h" -#include "StablePriv.h" #include "Prelude.h" #include "ParTicky.h" // ToDo: move into Rts.h #include "GCCompact.h" #include "Signals.h" +#include "STM.h" #if defined(GRAN) || defined(PAR) # include "GranSimRts.h" # include "ParallelRts.h" @@ -44,10 +44,15 @@ #endif #include "RetainerProfile.h" -#include "LdvProfile.h" #include +// Turn off inlining when debugging - it obfuscates things +#ifdef DEBUG +# undef STATIC_INLINE +# define STATIC_INLINE static +#endif + /* STATIC OBJECT LIST. * * During GC: @@ -139,9 +144,17 @@ static lnat thunk_selector_depth = 0; static bdescr * gc_alloc_block ( step *stp ); static void mark_root ( StgClosure **root ); -static StgClosure * evacuate ( StgClosure *q ); + +// Use a register argument for evacuate, if available. +#if __GNUC__ >= 2 +#define REGPARM1 __attribute__((regparm(1))) +#else +#define REGPARM1 +#endif + +REGPARM1 static StgClosure * evacuate (StgClosure *q); + static void zero_static_object_list ( StgClosure* first_static ); -static void zero_mutable_list ( StgMutClosure *first ); static rtsBool traverse_weak_ptr_list ( void ); static void mark_weak_ptr_list ( StgWeak **list ); @@ -156,7 +169,6 @@ static rtsBool scavenge_one ( StgPtr p ); static void scavenge_large ( step * ); static void scavenge_static ( void ); static void scavenge_mutable_list ( generation *g ); -static void scavenge_mut_once_list ( generation *g ); static void scavenge_large_bitmap ( StgPtr p, StgLargeBitmap *large_bitmap, @@ -183,31 +195,31 @@ static rtsBool mark_stack_overflowed; static bdescr *oldgen_scan_bd; static StgPtr oldgen_scan; -static inline rtsBool +STATIC_INLINE rtsBool mark_stack_empty(void) { return mark_sp == mark_stack; } -static inline rtsBool +STATIC_INLINE rtsBool mark_stack_full(void) { return mark_sp >= mark_splim; } -static inline void +STATIC_INLINE void reset_mark_stack(void) { mark_sp = mark_stack; } -static inline void +STATIC_INLINE void push_mark_stack(StgPtr p) { *mark_sp++ = p; } -static inline StgPtr +STATIC_INLINE StgPtr pop_mark_stack(void) { return *--mark_sp; @@ -258,7 +270,7 @@ gc_alloc_block(step *stp) (and all younger generations): - follow all pointers in the root set. the root set includes all - mutable objects in all generations (mutable_list and mut_once_list). + mutable objects in all generations (mutable_list). - for each pointer, evacuate the object it points to into either @@ -270,7 +282,7 @@ gc_alloc_block(step *stp) When we evacuate an object we attempt to evacuate everything it points to into the same generation - this is achieved by setting evac_gen to the desired generation. If - we can't do this, then an entry in the mut_once list has to + we can't do this, then an entry in the mut list has to be made for the cross-generation pointer. + if the object is already in a generation > N, then leave @@ -299,15 +311,18 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) #endif #if defined(DEBUG) && defined(GRAN) - IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n", + IF_DEBUG(gc, debugBelch("@@ Starting garbage collection at %ld (%lx)\n", Now, Now)); #endif -#ifndef mingw32_TARGET_OS +#if defined(RTS_USER_SIGNALS) // block signals blockUserSignals(); #endif + // tell the STM to discard any cached closures its hoping to re-use + stmPreGCHook(); + // tell the stats department that we've started a GC stat_startGC(); @@ -359,18 +374,12 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) static_objects = END_OF_STATIC_LIST; scavenged_static_objects = END_OF_STATIC_LIST; - /* zero the mutable list for the oldest generation (see comment by - * zero_mutable_list below). - */ - if (major_gc) { - zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list); - } - /* Save the old to-space if we're doing a two-space collection */ if (RtsFlags.GcFlags.generations == 1) { old_to_blocks = g0s0->to_blocks; g0s0->to_blocks = NULL; + g0s0->n_to_blocks = 0; } /* Keep a count of how many new blocks we allocated during this GC @@ -382,8 +391,14 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // collecting. // for (g = 0; g <= N; g++) { - generations[g].mut_once_list = END_MUT_LIST; - generations[g].mut_list = END_MUT_LIST; + + // throw away the mutable list. Invariant: the mutable list + // always has at least one block; this means we can avoid a check for + // NULL in recordMutable(). + if (g != 0) { + freeChain(generations[g].mut_list); + generations[g].mut_list = allocBlock(); + } for (s = 0; s < generations[g].n_steps; s++) { @@ -414,7 +429,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // mark the large objects as not evacuated yet for (bd = stp->large_objects; bd; bd = bd->link) { - bd->flags = BF_LARGE; + bd->flags &= ~BF_EVACUATED; } // for a compacted step, we need to allocate the bitmap @@ -426,22 +441,29 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE); if (bitmap_size > 0) { - bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size) + bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size) / BLOCK_SIZE); stp->bitmap = bitmap_bdescr; bitmap = bitmap_bdescr->start; - IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p", + IF_DEBUG(gc, debugBelch("bitmap_size: %d, bitmap: %p", bitmap_size, bitmap);); // don't forget to fill it with zeros! memset(bitmap, 0, bitmap_size); - // for each block in this step, point to its bitmap from the + // For each block in this step, point to its bitmap from the // block descriptor. for (bd=stp->blocks; bd != NULL; bd = bd->link) { bd->u.bitmap = bitmap; bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE); + + // Also at this point we set the BF_COMPACTED flag + // for this block. The invariant is that + // BF_COMPACTED is always unset, except during GC + // when it is set on those blocks which will be + // compacted. + bd->flags |= BF_COMPACTED; } } } @@ -499,23 +521,12 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) int st; for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { generations[g].saved_mut_list = generations[g].mut_list; - generations[g].mut_list = END_MUT_LIST; - } - - // Do the mut-once lists first - for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { - IF_PAR_DEBUG(verbose, - printMutOnceList(&generations[g])); - scavenge_mut_once_list(&generations[g]); - evac_gen = g; - for (st = generations[g].n_steps-1; st >= 0; st--) { - scavenge(&generations[g].steps[st]); - } + generations[g].mut_list = allocBlock(); + // mut_list always has at least one block. } for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { - IF_PAR_DEBUG(verbose, - printMutableList(&generations[g])); + IF_PAR_DEBUG(verbose, printMutableList(&generations[g])); scavenge_mutable_list(&generations[g]); evac_gen = g; for (st = generations[g].n_steps-1; st >= 0; st--) { @@ -568,17 +579,6 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) */ markStablePtrTable(mark_root); -#ifdef INTERPRETER - { - /* ToDo: To fix the caf leak, we need to make the commented out - * parts of this code do something sensible - as described in - * the CAF document. - */ - extern void markHugsObjects(void); - markHugsObjects(); - } -#endif - /* ------------------------------------------------------------------------- * Repeatedly scavenge all the areas we know about until there's no * more scavenging to be done. @@ -712,6 +712,14 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) generations[g].collections++; // for stats } + // Count the mutable list as bytes "copied" for the purposes of + // stats. Every mutable list is copied during every GC. + if (g > 0) { + for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) { + copied += (bd->free - bd->start) * sizeof(StgWord); + } + } + for (s = 0; s < generations[g].n_steps; s++) { bdescr *next; stp = &generations[g].steps[s]; @@ -743,7 +751,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // for a compacted step, just shift the new to-space // onto the front of the now-compacted existing blocks. for (bd = stp->to_blocks; bd != NULL; bd = bd->link) { - bd->flags &= ~BF_EVACUATED; // now from-space + bd->flags &= ~BF_EVACUATED; // now from-space } // tack the new blocks on the end of the existing blocks if (stp->blocks == NULL) { @@ -754,6 +762,11 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) if (next == NULL) { bd->link = stp->to_blocks; } + // NB. this step might not be compacted next + // time, so reset the BF_COMPACTED flags. + // They are set before GC if we're going to + // compact. (search for BF_COMPACTED above). + bd->flags &= ~BF_COMPACTED; } } // add the new blocks to the block tally @@ -763,7 +776,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) stp->blocks = stp->to_blocks; stp->n_blocks = stp->n_to_blocks; for (bd = stp->blocks; bd != NULL; bd = bd->link) { - bd->flags &= ~BF_EVACUATED; // now from-space + bd->flags &= ~BF_EVACUATED; // now from-space } } stp->to_blocks = NULL; @@ -841,10 +854,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) oldest_gen->steps[0].n_blocks > (RtsFlags.GcFlags.compactThreshold * max) / 100))) { oldest_gen->steps[0].is_compacted = 1; -// fprintf(stderr,"compaction: on\n", live); +// debugBelch("compaction: on\n", live); } else { oldest_gen->steps[0].is_compacted = 0; -// fprintf(stderr,"compaction: off\n", live); +// debugBelch("compaction: off\n", live); } // if we're going to go over the maximum heap size, reduce the @@ -876,7 +889,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } #if 0 - fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live, + debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live, min_alloc, size, max); #endif @@ -958,7 +971,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) int pc_free; adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks); - IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); + IF_DEBUG(gc, debugBelch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks)); pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize; if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ { heapOverflow(); @@ -971,7 +984,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) blocks = RtsFlags.GcFlags.minAllocAreaSize; } } - resizeNursery(blocks); + resizeNurseries(blocks); } else { /* Generational collector: @@ -988,7 +1001,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) * percentage of g0s0 that was live at the last minor GC. */ if (N == 0) { - g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks; + g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks(); } /* Estimate a size for the allocation area based on the @@ -1011,12 +1024,12 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) blocks = RtsFlags.GcFlags.minAllocAreaSize; } - resizeNursery((nat)blocks); + resizeNurseries((nat)blocks); } else { // we might have added extra large blocks to the nursery, so // resize back to minAllocAreaSize again. - resizeNursery(RtsFlags.GcFlags.minAllocAreaSize); + resizeNurseries(RtsFlags.GcFlags.minAllocAreaSize); } } @@ -1080,7 +1093,7 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // ok, GC over: tell the stats department what happened. stat_endGC(allocated, collected, live, copied, N); -#ifndef mingw32_TARGET_OS +#if defined(RTS_USER_SIGNALS) // unblock signals again unblockUserSignals(); #endif @@ -1186,7 +1199,7 @@ traverse_weak_ptr_list(void) w->link = weak_ptr_list; weak_ptr_list = w; flag = rtsTrue; - IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", + IF_DEBUG(weak, debugBelch("Weak pointer still alive at %p -> %p", w, w->key)); continue; } @@ -1228,7 +1241,7 @@ traverse_weak_ptr_list(void) prev = &old_all_threads; for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { - (StgClosure *)tmp = isAlive((StgClosure *)t); + tmp = (StgTSO *)isAlive((StgClosure *)t); if (tmp != NULL) { t = tmp; @@ -1275,7 +1288,7 @@ traverse_weak_ptr_list(void) StgTSO *t, *tmp, *next; for (t = old_all_threads; t != END_TSO_QUEUE; t = next) { next = t->global_link; - (StgClosure *)tmp = evacuate((StgClosure *)t); + tmp = (StgTSO *)evacuate((StgClosure *)t); tmp->global_link = resurrected_threads; resurrected_threads = tmp; } @@ -1286,6 +1299,7 @@ traverse_weak_ptr_list(void) default: barf("traverse_weak_ptr_list"); + return rtsTrue; } } @@ -1313,7 +1327,7 @@ mark_weak_ptr_list ( StgWeak **list ) // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here ASSERT(w->header.info == &stg_DEAD_WEAK_info || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED); - (StgClosure *)w = evacuate((StgClosure *)w); + w = (StgWeak *)evacuate((StgClosure *)w); *last_w = w; last_w = &(w->link); } @@ -1366,7 +1380,7 @@ isAlive(StgClosure *p) } // check the mark bit for compacted steps - if (bd->step->is_compacted && is_marked((P_)p,bd)) { + if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) { return p; } @@ -1405,19 +1419,17 @@ mark_root(StgClosure **root) *root = evacuate(*root); } -static __inline__ void +STATIC_INLINE void upd_evacuee(StgClosure *p, StgClosure *dest) { - // Source object must be in from-space: - ASSERT((Bdescr((P_)p)->flags & BF_EVACUATED) == 0); // not true: (ToDo: perhaps it should be) // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED); - p->header.info = &stg_EVACUATED_info; + SET_INFO(p, &stg_EVACUATED_info); ((StgEvacuated *)p)->evacuee = dest; } -static __inline__ StgClosure * +STATIC_INLINE StgClosure * copy(StgClosure *src, nat size, step *stp) { P_ to, from, dest; @@ -1523,7 +1535,7 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) -------------------------------------------------------------------------- */ -static inline void +STATIC_INLINE void evacuate_large(StgPtr p) { bdescr *bd = Bdescr(p); @@ -1575,39 +1587,6 @@ evacuate_large(StgPtr p) } /* ----------------------------------------------------------------------------- - Adding a MUT_CONS to an older generation. - - This is necessary from time to time when we end up with an - old-to-new generation pointer in a non-mutable object. We defer - the promotion until the next GC. - -------------------------------------------------------------------------- */ - -static StgClosure * -mkMutCons(StgClosure *ptr, generation *gen) -{ - StgMutVar *q; - step *stp; - - stp = &gen->steps[0]; - - /* chain a new block onto the to-space for the destination step if - * necessary. - */ - if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) { - gc_alloc_block(stp); - } - - q = (StgMutVar *)stp->hp; - stp->hp += sizeofW(StgMutVar); - - SET_HDR(q,&stg_MUT_CONS_info,CCS_GC); - q->var = ptr; - recordOldToNewPtrs((StgMutClosure *)q); - - return (StgClosure *)q; -} - -/* ----------------------------------------------------------------------------- Evacuate This is called (eventually) for every live object in the system. @@ -1630,12 +1609,31 @@ mkMutCons(StgClosure *ptr, generation *gen) if M < evac_gen set failed_to_evac flag to indicate that we didn't manage to evacuate this object into evac_gen. + + OPTIMISATION NOTES: + + evacuate() is the single most important function performance-wise + in the GC. Various things have been tried to speed it up, but as + far as I can tell the code generated by gcc 3.2 with -O2 is about + as good as it's going to get. We pass the argument to evacuate() + in a register using the 'regparm' attribute (see the prototype for + evacuate() near the top of this file). + + Changing evacuate() to take an (StgClosure **) rather than + returning the new pointer seems attractive, because we can avoid + writing back the pointer when it hasn't changed (eg. for a static + object, or an object in a generation > N). However, I tried it and + it doesn't help. One reason is that the (StgClosure **) pointer + gets spilled to the stack inside evacuate(), resulting in far more + extra reads/writes than we save. -------------------------------------------------------------------------- */ -static StgClosure * +REGPARM1 static StgClosure * evacuate(StgClosure *q) { +#if defined(PAR) StgClosure *to; +#endif bdescr *bd = NULL; step *stp; const StgInfoTable *info; @@ -1674,7 +1672,7 @@ loop: /* If the object is in a step that we're compacting, then we * need to use an alternative evacuate procedure. */ - if (bd->step->is_compacted) { + if (bd->flags & BF_COMPACTED) { if (!is_marked((P_)q,bd)) { mark((P_)q,bd); if (mark_stack_full()) { @@ -1686,6 +1684,9 @@ loop: return q; } + /* Object is not already evacuated. */ + ASSERT((bd->flags & BF_EVACUATED) == 0); + stp = bd->step->to; } #ifdef DEBUG @@ -1722,8 +1723,10 @@ loop: case CONSTR_1_0: return copy(q,sizeofW(StgHeader)+1,stp); - case THUNK_1_0: // here because of MIN_UPD_SIZE + case THUNK_1_0: case THUNK_0_1: + return copy(q,sizeofW(StgThunk)+1,stp); + case THUNK_1_1: case THUNK_0_2: case THUNK_2_0: @@ -1734,7 +1737,7 @@ loop: stp = bd->step; } #endif - return copy(q,sizeofW(StgHeader)+2,stp); + return copy(q,sizeofW(StgThunk)+2,stp); case FUN_1_1: case FUN_0_2: @@ -1744,27 +1747,27 @@ loop: case CONSTR_2_0: return copy(q,sizeofW(StgHeader)+2,stp); - case FUN: case THUNK: + return copy(q,thunk_sizeW_fromITBL(info),stp); + + case FUN: case CONSTR: case IND_PERM: case IND_OLDGEN_PERM: case WEAK: case FOREIGN: case STABLE_NAME: - case BCO: return copy(q,sizeW_fromITBL(info),stp); + case BCO: + return copy(q,bco_sizeW((StgBCO *)q),stp); + case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: case SE_BLACKHOLE: case BLACKHOLE: return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp); - case BLACKHOLE_BQ: - to = copy(q,BLACKHOLE_sizeW(),stp); - return to; - case THUNK_SELECTOR: { StgClosure *p; @@ -1801,17 +1804,17 @@ loop: goto loop; case THUNK_STATIC: - if (info->srt_len > 0 && major_gc && - THUNK_STATIC_LINK((StgClosure *)q) == NULL) { - THUNK_STATIC_LINK((StgClosure *)q) = static_objects; + if (info->srt_bitmap != 0 && major_gc && + *THUNK_STATIC_LINK((StgClosure *)q) == NULL) { + *THUNK_STATIC_LINK((StgClosure *)q) = static_objects; static_objects = (StgClosure *)q; } return q; case FUN_STATIC: - if (info->srt_len > 0 && major_gc && - FUN_STATIC_LINK((StgClosure *)q) == NULL) { - FUN_STATIC_LINK((StgClosure *)q) = static_objects; + if (info->srt_bitmap != 0 && major_gc && + *FUN_STATIC_LINK((StgClosure *)q) == NULL) { + *FUN_STATIC_LINK((StgClosure *)q) = static_objects; static_objects = (StgClosure *)q; } return q; @@ -1823,15 +1826,15 @@ loop: */ if (major_gc && ((StgIndStatic *)q)->saved_info == NULL - && IND_STATIC_LINK((StgClosure *)q) == NULL) { - IND_STATIC_LINK((StgClosure *)q) = static_objects; + && *IND_STATIC_LINK((StgClosure *)q) == NULL) { + *IND_STATIC_LINK((StgClosure *)q) = static_objects; static_objects = (StgClosure *)q; } return q; case CONSTR_STATIC: - if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) { - STATIC_LINK(info,(StgClosure *)q) = static_objects; + if (major_gc && *STATIC_LINK(info,(StgClosure *)q) == NULL) { + *STATIC_LINK(info,(StgClosure *)q) = static_objects; static_objects = (StgClosure *)q; } return q; @@ -1853,13 +1856,18 @@ loop: case UPDATE_FRAME: case STOP_FRAME: case CATCH_FRAME: + case CATCH_STM_FRAME: + case CATCH_RETRY_FRAME: + case ATOMICALLY_FRAME: // shouldn't see these barf("evacuate: stack frame at %p\n", q); case PAP: - case AP: return copy(q,pap_sizeW((StgPAP*)q),stp); + case AP: + return copy(q,ap_sizeW((StgAP*)q),stp); + case AP_STACK: return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp); @@ -1886,6 +1894,7 @@ loop: case MUT_ARR_PTRS: case MUT_ARR_PTRS_FROZEN: + case MUT_ARR_PTRS_FROZEN0: // just copy the block return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp); @@ -1904,21 +1913,31 @@ loop: * list it contains. */ { - StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp); + StgTSO *new_tso; + StgPtr p, q; + + new_tso = (StgTSO *)copyPart((StgClosure *)tso, + tso_sizeW(tso), + sizeofW(StgTSO), stp); move_TSO(tso, new_tso); + for (p = tso->sp, q = new_tso->sp; + p < tso->stack+tso->stack_size;) { + *q++ = *p++; + } + return (StgClosure *)new_tso; } } #if defined(PAR) - case RBH: // cf. BLACKHOLE_BQ + case RBH: { //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str); to = copy(q,BLACKHOLE_sizeW(),stp); //ToDo: derive size etc from reverted IP //to = copy(q,size,stp); IF_DEBUG(gc, - belch("@@ evacuate: RBH %p (%s) to %p (%s)", + debugBelch("@@ evacuate: RBH %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); return to; } @@ -1927,7 +1946,7 @@ loop: ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE); to = copy(q,sizeofW(StgBlockedFetch),stp); IF_DEBUG(gc, - belch("@@ evacuate: %p (%s) to %p (%s)", + debugBelch("@@ evacuate: %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); return to; @@ -1938,7 +1957,7 @@ loop: ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); to = copy(q,sizeofW(StgFetchMe),stp); IF_DEBUG(gc, - belch("@@ evacuate: %p (%s) to %p (%s)", + debugBelch("@@ evacuate: %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); return to; @@ -1946,11 +1965,23 @@ loop: ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE); to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp); IF_DEBUG(gc, - belch("@@ evacuate: %p (%s) to %p (%s)", + debugBelch("@@ evacuate: %p (%s) to %p (%s)", q, info_type(q), to, info_type(to))); return to; #endif + case TREC_HEADER: + return copy(q,sizeofW(StgTRecHeader),stp); + + case TVAR_WAIT_QUEUE: + return copy(q,sizeofW(StgTVarWaitQueue),stp); + + case TVAR: + return copy(q,sizeofW(StgTVar),stp); + + case TREC_CHUNK: + return copy(q,sizeofW(StgTRecChunk),stp); + default: barf("evacuate: strange closure type %d", (int)(info->type)); } @@ -1971,8 +2002,66 @@ loop: been BLACKHOLE'd, and should be updated with an indirection or a forwarding pointer. If the return value is NULL, then the selector thunk is unchanged. + + *** + ToDo: the treatment of THUNK_SELECTORS could be improved in the + following way (from a suggestion by Ian Lynagh): + + We can have a chain like this: + + sel_0 --> (a,b) + | + |-----> sel_0 --> (a,b) + | + |-----> sel_0 --> ... + + and the depth limit means we don't go all the way to the end of the + chain, which results in a space leak. This affects the recursive + call to evacuate() in the THUNK_SELECTOR case in evacuate(): *not* + the recursive call to eval_thunk_selector() in + eval_thunk_selector(). + + We could eliminate the depth bound in this case, in the following + way: + + - traverse the chain once to discover the *value* of the + THUNK_SELECTOR. Mark all THUNK_SELECTORS that we + visit on the way as having been visited already (somehow). + + - in a second pass, traverse the chain again updating all + THUNK_SEELCTORS that we find on the way with indirections to + the value. + + - if we encounter a "marked" THUNK_SELECTOR in a normal + evacuate(), we konw it can't be updated so just evac it. + + Program that illustrates the problem: + + foo [] = ([], []) + foo (x:xs) = let (ys, zs) = foo xs + in if x >= 0 then (x:ys, zs) else (ys, x:zs) + + main = bar [1..(100000000::Int)] + bar xs = (\(ys, zs) -> print ys >> print zs) (foo xs) + -------------------------------------------------------------------------- */ +static inline rtsBool +is_to_space ( StgClosure *p ) +{ + bdescr *bd; + + bd = Bdescr((StgPtr)p); + if (HEAP_ALLOCED(p) && + ((bd->flags & BF_EVACUATED) + || ((bd->flags & BF_COMPACTED) && + is_marked((P_)p,bd)))) { + return rtsTrue; + } else { + return rtsFalse; + } +} + static StgClosure * eval_thunk_selector( nat field, StgSelector * p ) { @@ -2000,6 +2089,38 @@ eval_thunk_selector( nat field, StgSelector * p ) selector_loop: + // We don't want to end up in to-space, because this causes + // problems when the GC later tries to evacuate the result of + // eval_thunk_selector(). There are various ways this could + // happen: + // + // 1. following an IND_STATIC + // + // 2. when the old generation is compacted, the mark phase updates + // from-space pointers to be to-space pointers, and we can't + // reliably tell which we're following (eg. from an IND_STATIC). + // + // 3. compacting GC again: if we're looking at a constructor in + // the compacted generation, it might point directly to objects + // in to-space. We must bale out here, otherwise doing the selection + // will result in a to-space pointer being returned. + // + // (1) is dealt with using a BF_EVACUATED test on the + // selectee. (2) and (3): we can tell if we're looking at an + // object in the compacted generation that might point to + // to-space objects by testing that (a) it is BF_COMPACTED, (b) + // the compacted generation is being collected, and (c) the + // object is marked. Only a marked object may have pointers that + // point to to-space objects, because that happens when + // scavenging. + // + // The to-space test is now embodied in the in_to_space() inline + // function, as it is re-used below. + // + if (is_to_space(selectee)) { + goto bale_out; + } + info = get_itbl(selectee); switch (info->type) { case CONSTR: @@ -2014,12 +2135,27 @@ selector_loop: ASSERT(field < (StgWord32)(info->layout.payload.ptrs + info->layout.payload.nptrs)); - return selectee->payload[field]; + // Select the right field from the constructor, and check + // that the result isn't in to-space. It might be in + // to-space if, for example, this constructor contains + // pointers to younger-gen objects (and is on the mut-once + // list). + // + { + StgClosure *q; + q = selectee->payload[field]; + if (is_to_space(q)) { + goto bale_out; + } else { + return q; + } + } case IND: case IND_PERM: case IND_OLDGEN: case IND_OLDGEN_PERM: + case IND_STATIC: selectee = ((StgInd *)selectee)->indirectee; goto selector_loop; @@ -2029,21 +2165,16 @@ selector_loop: // leaks by evaluating this selector thunk anyhow. break; - case IND_STATIC: - // We can't easily tell whether the indirectee is into - // from or to-space, so just bail out here. - break; - case THUNK_SELECTOR: { StgClosure *val; // check that we don't recurse too much, re-using the // depth bound also used in evacuate(). - thunk_selector_depth++; - if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) { + if (thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) { break; } + thunk_selector_depth++; val = eval_thunk_selector(info->layout.selector_offset, (StgSelector *)selectee); @@ -2062,21 +2193,22 @@ selector_loop: // For the purposes of LDV profiling, we have destroyed // the original selector thunk. SET_INFO(p, info_ptr); - LDV_recordDead_FILL_SLOP_DYNAMIC(selectee); + LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(selectee); #endif ((StgInd *)selectee)->indirectee = val; SET_INFO(selectee,&stg_IND_info); -#ifdef PROFILING + // For the purposes of LDV profiling, we have created an // indirection. - LDV_recordCreate(selectee); -#endif + LDV_RECORD_CREATE(selectee); + selectee = val; goto selector_loop; } } case AP: + case AP_STACK: case THUNK: case THUNK_1_0: case THUNK_0_1: @@ -2088,7 +2220,6 @@ selector_loop: case SE_CAF_BLACKHOLE: case SE_BLACKHOLE: case BLACKHOLE: - case BLACKHOLE_BQ: #if defined(PAR) case RBH: case BLOCKED_FETCH: @@ -2106,6 +2237,7 @@ selector_loop: (int)(info->type)); } +bale_out: // We didn't manage to evaluate this thunk; restore the old info pointer SET_INFO(p, info_ptr); return NULL; @@ -2121,70 +2253,100 @@ move_TSO (StgTSO *src, StgTSO *dest) { ptrdiff_t diff; - // relocate the stack pointers... + // relocate the stack pointer... diff = (StgPtr)dest - (StgPtr)src; // In *words* dest->sp = (StgPtr)dest->sp + diff; } -/* evacuate the SRT. If srt_len is zero, then there isn't an +/* Similar to scavenge_large_bitmap(), but we don't write back the + * pointers we get back from evacuate(). + */ +static void +scavenge_large_srt_bitmap( StgLargeSRT *large_srt ) +{ + nat i, b, size; + StgWord bitmap; + StgClosure **p; + + b = 0; + bitmap = large_srt->l.bitmap[b]; + size = (nat)large_srt->l.size; + p = (StgClosure **)large_srt->srt; + for (i = 0; i < size; ) { + if ((bitmap & 1) != 0) { + evacuate(*p); + } + i++; + p++; + if (i % BITS_IN(W_) == 0) { + b++; + bitmap = large_srt->l.bitmap[b]; + } else { + bitmap = bitmap >> 1; + } + } +} + +/* evacuate the SRT. If srt_bitmap is zero, then there isn't an * srt field in the info table. That's ok, because we'll * never dereference it. */ -static inline void -scavenge_srt (StgClosure **srt, nat srt_len) +STATIC_INLINE void +scavenge_srt (StgClosure **srt, nat srt_bitmap) { - StgClosure **srt_end; + nat bitmap; + StgClosure **p; - srt_end = srt + srt_len; + bitmap = srt_bitmap; + p = srt; - for (; srt < srt_end; srt++) { - /* Special-case to handle references to closures hiding out in DLLs, since - double indirections required to get at those. The code generator knows - which is which when generating the SRT, so it stores the (indirect) - reference to the DLL closure in the table by first adding one to it. - We check for this here, and undo the addition before evacuating it. + if (bitmap == (StgHalfWord)(-1)) { + scavenge_large_srt_bitmap( (StgLargeSRT *)srt ); + return; + } - If the SRT entry hasn't got bit 0 set, the SRT entry points to a - closure that's fixed at link-time, and no extra magic is required. - */ + while (bitmap != 0) { + if ((bitmap & 1) != 0) { #ifdef ENABLE_WIN32_DLL_SUPPORT - if ( (unsigned long)(*srt) & 0x1 ) { - evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1))); - } else { - evacuate(*srt); - } + // Special-case to handle references to closures hiding out in DLLs, since + // double indirections required to get at those. The code generator knows + // which is which when generating the SRT, so it stores the (indirect) + // reference to the DLL closure in the table by first adding one to it. + // We check for this here, and undo the addition before evacuating it. + // + // If the SRT entry hasn't got bit 0 set, the SRT entry points to a + // closure that's fixed at link-time, and no extra magic is required. + if ( (unsigned long)(*srt) & 0x1 ) { + evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1))); + } else { + evacuate(*p); + } #else - evacuate(*srt); + evacuate(*p); #endif + } + p++; + bitmap = bitmap >> 1; } } -static inline void +STATIC_INLINE void scavenge_thunk_srt(const StgInfoTable *info) { StgThunkInfoTable *thunk_info; thunk_info = itbl_to_thunk_itbl(info); - scavenge_srt((StgClosure **)thunk_info->srt, thunk_info->i.srt_len); + scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap); } -static inline void +STATIC_INLINE void scavenge_fun_srt(const StgInfoTable *info) { StgFunInfoTable *fun_info; fun_info = itbl_to_fun_itbl(info); - scavenge_srt((StgClosure **)fun_info->srt, fun_info->i.srt_len); -} - -static inline void -scavenge_ret_srt(const StgInfoTable *info) -{ - StgRetInfoTable *ret_info; - - ret_info = itbl_to_ret_itbl(info); - scavenge_srt((StgClosure **)ret_info->srt, ret_info->i.srt_len); + scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap); } /* ----------------------------------------------------------------------------- @@ -2195,7 +2357,7 @@ static void scavengeTSO (StgTSO *tso) { // chase the link field for any TSOs on the same queue - (StgClosure *)tso->link = evacuate((StgClosure *)tso->link); + tso->link = (StgTSO *)evacuate((StgClosure *)tso->link); if ( tso->why_blocked == BlockedOnMVar || tso->why_blocked == BlockedOnBlackHole || tso->why_blocked == BlockedOnException @@ -2211,6 +2373,9 @@ scavengeTSO (StgTSO *tso) (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions); } + // scavange current transaction record + tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec); + // scavenge this thread's stack scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); } @@ -2220,7 +2385,7 @@ scavengeTSO (StgTSO *tso) in PAPs. -------------------------------------------------------------------------- */ -static inline StgPtr +STATIC_INLINE StgPtr scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) { StgPtr p; @@ -2228,23 +2393,23 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) nat size; p = (StgPtr)args; - switch (fun_info->fun_type) { + switch (fun_info->f.fun_type) { case ARG_GEN: - bitmap = BITMAP_BITS(fun_info->bitmap); - size = BITMAP_SIZE(fun_info->bitmap); + bitmap = BITMAP_BITS(fun_info->f.b.bitmap); + size = BITMAP_SIZE(fun_info->f.b.bitmap); goto small_bitmap; case ARG_GEN_BIG: - size = ((StgLargeBitmap *)fun_info->bitmap)->size; - scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size); + size = GET_FUN_LARGE_BITMAP(fun_info)->size; + scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); p += size; break; default: - bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]); - size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->fun_type]); + bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); + size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]); small_bitmap: while (size > 0) { if ((bitmap & 1) == 0) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } p++; bitmap = bitmap >> 1; @@ -2255,39 +2420,35 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) return p; } -static inline StgPtr -scavenge_PAP (StgPAP *pap) +STATIC_INLINE StgPtr +scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size) { StgPtr p; - StgWord bitmap, size; + StgWord bitmap; StgFunInfoTable *fun_info; - - pap->fun = evacuate(pap->fun); - fun_info = get_fun_itbl(pap->fun); + + fun_info = get_fun_itbl(fun); ASSERT(fun_info->i.type != PAP); + p = (StgPtr)payload; - p = (StgPtr)pap->payload; - size = pap->n_args; - - switch (fun_info->fun_type) { + switch (fun_info->f.fun_type) { case ARG_GEN: - bitmap = BITMAP_BITS(fun_info->bitmap); + bitmap = BITMAP_BITS(fun_info->f.b.bitmap); goto small_bitmap; case ARG_GEN_BIG: - scavenge_large_bitmap(p, (StgLargeBitmap *)fun_info->bitmap, size); + scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); p += size; break; case ARG_BCO: - scavenge_large_bitmap((StgPtr)pap->payload, BCO_BITMAP(pap->fun), size); + scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size); p += size; break; default: - bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->fun_type]); + bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); small_bitmap: - size = pap->n_args; while (size > 0) { if ((bitmap & 1) == 0) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } p++; bitmap = bitmap >> 1; @@ -2298,6 +2459,20 @@ scavenge_PAP (StgPAP *pap) return p; } +STATIC_INLINE StgPtr +scavenge_PAP (StgPAP *pap) +{ + pap->fun = evacuate(pap->fun); + return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args); +} + +STATIC_INLINE StgPtr +scavenge_AP (StgAP *ap) +{ + ap->fun = evacuate(ap->fun); + return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args); +} + /* ----------------------------------------------------------------------------- Scavenge a given step until there are no more objects in this step to scavenge. @@ -2344,20 +2519,16 @@ scavenge(step *stp) q = p; switch (info->type) { - + case MVAR: - /* treat MVars specially, because we don't want to evacuate the - * mut_link field in the middle of the closure. - */ { StgMVar *mvar = ((StgMVar *)p); evac_gen = 0; - (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); - (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); - (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); + mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); + mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); + mvar->value = evacuate((StgClosure *)mvar->value); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)mvar); - failed_to_evac = rtsFalse; // mutable. + failed_to_evac = rtsTrue; // mutable. p += sizeofW(StgMVar); break; } @@ -2371,6 +2542,11 @@ scavenge(step *stp) case THUNK_2_0: scavenge_thunk_srt(info); + ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]); + ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]); + p += sizeofW(StgThunk) + 2; + break; + case CONSTR_2_0: ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); @@ -2379,8 +2555,8 @@ scavenge(step *stp) case THUNK_1_0: scavenge_thunk_srt(info); - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); - p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE + ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]); + p += sizeofW(StgThunk) + 1; break; case FUN_1_0: @@ -2392,7 +2568,7 @@ scavenge(step *stp) case THUNK_0_1: scavenge_thunk_srt(info); - p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE + p += sizeofW(StgThunk) + 1; break; case FUN_0_1: @@ -2403,7 +2579,7 @@ scavenge(step *stp) case THUNK_0_2: scavenge_thunk_srt(info); - p += sizeofW(StgHeader) + 2; + p += sizeofW(StgThunk) + 2; break; case FUN_0_2: @@ -2414,8 +2590,8 @@ scavenge(step *stp) case THUNK_1_1: scavenge_thunk_srt(info); - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); - p += sizeofW(StgHeader) + 2; + ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]); + p += sizeofW(StgThunk) + 2; break; case FUN_1_1: @@ -2430,26 +2606,44 @@ scavenge(step *stp) goto gen_obj; case THUNK: + { + StgPtr end; + scavenge_thunk_srt(info); - // fall through + end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgThunk *)p)->payload; p < end; p++) { + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + } + p += info->layout.payload.nptrs; + break; + } gen_obj: case CONSTR: case WEAK: case FOREIGN: case STABLE_NAME: - case BCO: { StgPtr end; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } p += info->layout.payload.nptrs; break; } + case BCO: { + StgBCO *bco = (StgBCO *)p; + bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs); + bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals); + bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs); + bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls); + p += bco_sizeW(bco); + break; + } + case IND_PERM: if (stp->gen->no != 0) { #ifdef PROFILING @@ -2459,38 +2653,24 @@ scavenge(step *stp) LDV_recordDead((StgClosure *)p, sizeofW(StgInd)); #endif // - // Todo: maybe use SET_HDR() and remove LDV_recordCreate()? + // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()? // SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); -#ifdef PROFILING - // @LDV profiling + // We pretend that p has just been created. - LDV_recordCreate((StgClosure *)p); -#endif + LDV_RECORD_CREATE((StgClosure *)p); } // fall through case IND_OLDGEN_PERM: - ((StgIndOldGen *)p)->indirectee = - evacuate(((StgIndOldGen *)p)->indirectee); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordOldToNewPtrs((StgMutClosure *)p); - } - p += sizeofW(StgIndOldGen); + ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee); + p += sizeofW(StgInd); break; case MUT_VAR: evac_gen = 0; ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)p); - failed_to_evac = rtsFalse; // mutable anyhow - p += sizeofW(StgMutVar); - break; - - case MUT_CONS: - // ignore these - failed_to_evac = rtsFalse; // mutable anyhow + failed_to_evac = rtsTrue; // mutable anyhow p += sizeofW(StgMutVar); break; @@ -2501,17 +2681,6 @@ scavenge(step *stp) p += BLACKHOLE_sizeW(); break; - case BLACKHOLE_BQ: - { - StgBlockingQueue *bh = (StgBlockingQueue *)p; - (StgClosure *)bh->blocking_queue = - evacuate((StgClosure *)bh->blocking_queue); - recordMutable((StgMutClosure *)bh); - failed_to_evac = rtsFalse; - p += BLACKHOLE_sizeW(); - break; - } - case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; @@ -2532,10 +2701,13 @@ scavenge(step *stp) } case PAP: - case AP: p = scavenge_PAP((StgPAP *)p); break; + case AP: + p = scavenge_AP((StgAP *)p); + break; + case ARR_WORDS: // nothing to follow p += arr_words_sizeW((StgArrWords *)p); @@ -2549,22 +2721,22 @@ scavenge(step *stp) evac_gen = 0; // repeatedly mutable next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)q); - failed_to_evac = rtsFalse; // mutable anyhow. + failed_to_evac = rtsTrue; // mutable anyhow. break; } case MUT_ARR_PTRS_FROZEN: + case MUT_ARR_PTRS_FROZEN0: // follow everything { StgPtr next; next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } // it's tempting to recordMutable() if failed_to_evac is // false, but that breaks some assumptions (eg. every @@ -2579,14 +2751,13 @@ scavenge(step *stp) evac_gen = 0; scavengeTSO(tso); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)tso); - failed_to_evac = rtsFalse; // mutable anyhow. + failed_to_evac = rtsTrue; // mutable anyhow. p += tso_sizeW(tso); break; } #if defined(PAR) - case RBH: // cf. BLACKHOLE_BQ + case RBH: { #if 0 nat size, ptrs, nonptrs, vhs; @@ -2596,10 +2767,9 @@ scavenge(step *stp) StgRBH *rbh = (StgRBH *)p; (StgClosure *)rbh->blocking_queue = evacuate((StgClosure *)rbh->blocking_queue); - recordMutable((StgMutClosure *)to); - failed_to_evac = rtsFalse; // mutable anyhow. + failed_to_evac = rtsTrue; // mutable anyhow. IF_DEBUG(gc, - belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", + debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", p, info_type(p), (StgClosure *)rbh->blocking_queue)); // ToDo: use size of reverted closure here! p += BLACKHOLE_sizeW(); @@ -2615,12 +2785,8 @@ scavenge(step *stp) // follow the link to the rest of the blocking queue (StgClosure *)bf->link = evacuate((StgClosure *)bf->link); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)bf); - } IF_DEBUG(gc, - belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", + debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", bf, info_type((StgClosure *)bf), bf->node, info_type(bf->node))); p += sizeofW(StgBlockedFetch); @@ -2634,35 +2800,89 @@ scavenge(step *stp) p += sizeofW(StgFetchMe); break; // nothing to do in this case - case FETCH_ME_BQ: // cf. BLACKHOLE_BQ + case FETCH_ME_BQ: { StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; (StgClosure *)fmbq->blocking_queue = evacuate((StgClosure *)fmbq->blocking_queue); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)fmbq); - } IF_DEBUG(gc, - belch("@@ scavenge: %p (%s) exciting, isn't it", + debugBelch("@@ scavenge: %p (%s) exciting, isn't it", p, info_type((StgClosure *)p))); p += sizeofW(StgFetchMeBlockingQueue); break; } #endif + case TVAR_WAIT_QUEUE: + { + StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p); + evac_gen = 0; + wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso); + wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry); + wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgTVarWaitQueue); + break; + } + + case TVAR: + { + StgTVar *tvar = ((StgTVar *) p); + evac_gen = 0; + tvar->current_value = evacuate((StgClosure*)tvar->current_value); + tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgTVar); + break; + } + + case TREC_HEADER: + { + StgTRecHeader *trec = ((StgTRecHeader *) p); + evac_gen = 0; + trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); + trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgTRecHeader); + break; + } + + case TREC_CHUNK: + { + StgWord i; + StgTRecChunk *tc = ((StgTRecChunk *) p); + TRecEntry *e = &(tc -> entries[0]); + evac_gen = 0; + tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); + for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { + e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); + e->expected_value = evacuate((StgClosure*)e->expected_value); + e->new_value = evacuate((StgClosure*)e->new_value); + } + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgTRecChunk); + break; + } + default: barf("scavenge: unimplemented/strange closure type %d @ %p", info->type, p); } - /* If we didn't manage to promote all the objects pointed to by - * the current object, then we have to designate this object as - * mutable (because it contains old-to-new generation pointers). + /* + * We need to record the current object on the mutable list if + * (a) It is actually mutable, or + * (b) It contains pointers to a younger generation. + * Case (b) arises if we didn't manage to promote everything that + * the current object points to into the current generation. */ if (failed_to_evac) { failed_to_evac = rtsFalse; - mkMutCons((StgClosure *)q, &generations[evac_gen]); + recordMutableGen((StgClosure *)q, stp->gen); } } @@ -2699,17 +2919,14 @@ linear_scan: switch (info->type) { case MVAR: - /* treat MVars specially, because we don't want to evacuate the - * mut_link field in the middle of the closure. - */ { StgMVar *mvar = ((StgMVar *)p); evac_gen = 0; - (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); - (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); - (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); + mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); + mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); + mvar->value = evacuate((StgClosure *)mvar->value); evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; // mutable. + failed_to_evac = rtsTrue; // mutable. break; } @@ -2721,6 +2938,10 @@ linear_scan: case THUNK_2_0: scavenge_thunk_srt(info); + ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]); + ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]); + break; + case CONSTR_2_0: ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); @@ -2735,6 +2956,9 @@ linear_scan: case THUNK_1_0: case THUNK_1_1: scavenge_thunk_srt(info); + ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]); + break; + case CONSTR_1_0: case CONSTR_1_1: ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); @@ -2759,25 +2983,41 @@ linear_scan: goto gen_obj; case THUNK: + { + StgPtr end; + scavenge_thunk_srt(info); - // fall through + end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgThunk *)p)->payload; p < end; p++) { + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + } + break; + } gen_obj: case CONSTR: case WEAK: case FOREIGN: case STABLE_NAME: - case BCO: { StgPtr end; end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } break; } + case BCO: { + StgBCO *bco = (StgBCO *)p; + bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs); + bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals); + bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs); + bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls); + break; + } + case IND_PERM: // don't need to do anything here: the only possible case // is that we're in a 1-space compacting collector, with @@ -2786,24 +3026,15 @@ linear_scan: case IND_OLDGEN: case IND_OLDGEN_PERM: - ((StgIndOldGen *)p)->indirectee = - evacuate(((StgIndOldGen *)p)->indirectee); - if (failed_to_evac) { - recordOldToNewPtrs((StgMutClosure *)p); - } - failed_to_evac = rtsFalse; + ((StgInd *)p)->indirectee = + evacuate(((StgInd *)p)->indirectee); break; case MUT_VAR: evac_gen = 0; ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; - break; - - case MUT_CONS: - // ignore these - failed_to_evac = rtsFalse; + failed_to_evac = rtsTrue; break; case CAF_BLACKHOLE: @@ -2813,16 +3044,7 @@ linear_scan: case ARR_WORDS: break; - case BLACKHOLE_BQ: - { - StgBlockingQueue *bh = (StgBlockingQueue *)p; - (StgClosure *)bh->blocking_queue = - evacuate((StgClosure *)bh->blocking_queue); - failed_to_evac = rtsFalse; - break; - } - - case THUNK_SELECTOR: + case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; s->selectee = evacuate(s->selectee); @@ -2840,9 +3062,12 @@ linear_scan: } case PAP: - case AP: scavenge_PAP((StgPAP *)p); break; + + case AP: + scavenge_AP((StgAP *)p); + break; case MUT_ARR_PTRS: // follow everything @@ -2852,21 +3077,22 @@ linear_scan: evac_gen = 0; // repeatedly mutable next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; // mutable anyhow. + failed_to_evac = rtsTrue; // mutable anyhow. break; } case MUT_ARR_PTRS_FROZEN: + case MUT_ARR_PTRS_FROZEN0: // follow everything { StgPtr next; next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } break; } @@ -2877,12 +3103,12 @@ linear_scan: evac_gen = 0; scavengeTSO(tso); evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; + failed_to_evac = rtsTrue; break; } #if defined(PAR) - case RBH: // cf. BLACKHOLE_BQ + case RBH: { #if 0 nat size, ptrs, nonptrs, vhs; @@ -2890,12 +3116,11 @@ linear_scan: StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); #endif StgRBH *rbh = (StgRBH *)p; - (StgClosure *)rbh->blocking_queue = - evacuate((StgClosure *)rbh->blocking_queue); - recordMutable((StgMutClosure *)rbh); - failed_to_evac = rtsFalse; // mutable anyhow. + bh->blocking_queue = + (StgTSO *)evacuate((StgClosure *)bh->blocking_queue); + failed_to_evac = rtsTrue; // mutable anyhow. IF_DEBUG(gc, - belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", + debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", p, info_type(p), (StgClosure *)rbh->blocking_queue)); break; } @@ -2909,12 +3134,8 @@ linear_scan: // follow the link to the rest of the blocking queue (StgClosure *)bf->link = evacuate((StgClosure *)bf->link); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)bf); - } IF_DEBUG(gc, - belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", + debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", bf, info_type((StgClosure *)bf), bf->node, info_type(bf->node))); break; @@ -2926,21 +3147,68 @@ linear_scan: case FETCH_ME: break; // nothing to do in this case - case FETCH_ME_BQ: // cf. BLACKHOLE_BQ + case FETCH_ME_BQ: { StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; (StgClosure *)fmbq->blocking_queue = evacuate((StgClosure *)fmbq->blocking_queue); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)fmbq); - } IF_DEBUG(gc, - belch("@@ scavenge: %p (%s) exciting, isn't it", + debugBelch("@@ scavenge: %p (%s) exciting, isn't it", p, info_type((StgClosure *)p))); break; } -#endif // PAR +#endif /* PAR */ + + case TVAR_WAIT_QUEUE: + { + StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p); + evac_gen = 0; + wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso); + wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry); + wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } + + case TVAR: + { + StgTVar *tvar = ((StgTVar *) p); + evac_gen = 0; + tvar->current_value = evacuate((StgClosure*)tvar->current_value); + tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } + + case TREC_CHUNK: + { + StgWord i; + StgTRecChunk *tc = ((StgTRecChunk *) p); + TRecEntry *e = &(tc -> entries[0]); + evac_gen = 0; + tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); + for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { + e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); + e->expected_value = evacuate((StgClosure*)e->expected_value); + e->new_value = evacuate((StgClosure*)e->new_value); + } + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } + + case TREC_HEADER: + { + StgTRecHeader *trec = ((StgTRecHeader *) p); + evac_gen = 0; + trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); + trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } default: barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p", @@ -2949,7 +3217,7 @@ linear_scan: if (failed_to_evac) { failed_to_evac = rtsFalse; - mkMutCons((StgClosure *)q, &generations[evac_gen]); + recordMutableGen((StgClosure *)q, &generations[evac_gen]); } // mark the next bit to indicate "scavenged" @@ -2959,7 +3227,7 @@ linear_scan: // start a new linear scan if the mark stack overflowed at some point if (mark_stack_overflowed && oldgen_scan_bd == NULL) { - IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan")); + IF_DEBUG(gc, debugBelch("scavenge_mark_stack: starting linear scan")); mark_stack_overflowed = rtsFalse; oldgen_scan_bd = oldest_gen->steps[0].blocks; oldgen_scan = oldgen_scan_bd->start; @@ -3016,18 +3284,40 @@ scavenge_one(StgPtr p) switch (info->type) { - case FUN: - case FUN_1_0: // hardly worth specialising these guys - case FUN_0_1: - case FUN_1_1: - case FUN_0_2: - case FUN_2_0: + case MVAR: + { + StgMVar *mvar = ((StgMVar *)p); + evac_gen = 0; + mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); + mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); + mvar->value = evacuate((StgClosure *)mvar->value); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable. + break; + } + case THUNK: case THUNK_1_0: case THUNK_0_1: case THUNK_1_1: case THUNK_0_2: case THUNK_2_0: + { + StgPtr q, end; + + end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs; + for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) { + *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); + } + break; + } + + case FUN: + case FUN_1_0: // hardly worth specialising these guys + case FUN_0_1: + case FUN_1_1: + case FUN_0_2: + case FUN_2_0: case CONSTR: case CONSTR_1_0: case CONSTR_0_1: @@ -3037,17 +3327,23 @@ scavenge_one(StgPtr p) case WEAK: case FOREIGN: case IND_PERM: - case IND_OLDGEN_PERM: { StgPtr q, end; end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); + *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); } break; } + case MUT_VAR: + evac_gen = 0; + ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable anyhow + break; + case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: case SE_BLACKHOLE: @@ -3061,6 +3357,24 @@ scavenge_one(StgPtr p) break; } + case AP_STACK: + { + StgAP_STACK *ap = (StgAP_STACK *)p; + + ap->fun = evacuate(ap->fun); + scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); + p = (StgPtr)ap->payload + ap->size; + break; + } + + case PAP: + p = scavenge_AP((StgAP *)p); + break; + + case AP: + p = scavenge_PAP((StgPAP *)p); + break; + case ARR_WORDS: // nothing to follow break; @@ -3071,24 +3385,24 @@ scavenge_one(StgPtr p) StgPtr next; evac_gen = 0; // repeatedly mutable - recordMutable((StgMutClosure *)p); next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; + failed_to_evac = rtsTrue; break; } case MUT_ARR_PTRS_FROZEN: + case MUT_ARR_PTRS_FROZEN0: { // follow everything StgPtr next; next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } break; } @@ -3099,83 +3413,133 @@ scavenge_one(StgPtr p) evac_gen = 0; // repeatedly mutable scavengeTSO(tso); - recordMutable((StgMutClosure *)tso); evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; + failed_to_evac = rtsTrue; break; } - case AP_STACK: - { - StgAP_STACK *ap = (StgAP_STACK *)p; - - ap->fun = evacuate(ap->fun); - scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); - p = (StgPtr)ap->payload + ap->size; +#if defined(PAR) + case RBH: + { +#if 0 + nat size, ptrs, nonptrs, vhs; + char str[80]; + StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); +#endif + StgRBH *rbh = (StgRBH *)p; + (StgClosure *)rbh->blocking_queue = + evacuate((StgClosure *)rbh->blocking_queue); + failed_to_evac = rtsTrue; // mutable anyhow. + IF_DEBUG(gc, + debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", + p, info_type(p), (StgClosure *)rbh->blocking_queue)); + // ToDo: use size of reverted closure here! break; } - case PAP: - case AP: - p = scavenge_PAP((StgPAP *)p); - break; - - case IND_OLDGEN: - // This might happen if for instance a MUT_CONS was pointing to a - // THUNK which has since been updated. The IND_OLDGEN will - // be on the mutable list anyway, so we don't need to do anything - // here. + case BLOCKED_FETCH: + { + StgBlockedFetch *bf = (StgBlockedFetch *)p; + // follow the pointer to the node which is being demanded + (StgClosure *)bf->node = + evacuate((StgClosure *)bf->node); + // follow the link to the rest of the blocking queue + (StgClosure *)bf->link = + evacuate((StgClosure *)bf->link); + IF_DEBUG(gc, + debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", + bf, info_type((StgClosure *)bf), + bf->node, info_type(bf->node))); break; + } - default: - barf("scavenge_one: strange object %d", (int)(info->type)); - } - - no_luck = failed_to_evac; - failed_to_evac = rtsFalse; - return (no_luck); -} - -/* ----------------------------------------------------------------------------- - Scavenging mutable lists. +#ifdef DIST + case REMOTE_REF: +#endif + case FETCH_ME: + break; // nothing to do in this case - We treat the mutable list of each generation > N (i.e. all the - generations older than the one being collected) as roots. We also - remove non-mutable objects from the mutable list at this point. - -------------------------------------------------------------------------- */ + case FETCH_ME_BQ: + { + StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; + (StgClosure *)fmbq->blocking_queue = + evacuate((StgClosure *)fmbq->blocking_queue); + IF_DEBUG(gc, + debugBelch("@@ scavenge: %p (%s) exciting, isn't it", + p, info_type((StgClosure *)p))); + break; + } +#endif -static void -scavenge_mut_once_list(generation *gen) -{ - const StgInfoTable *info; - StgMutClosure *p, *next, *new_list; + case TVAR_WAIT_QUEUE: + { + StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p); + evac_gen = 0; + wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso); + wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry); + wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } - p = gen->mut_once_list; - new_list = END_MUT_LIST; - next = p->mut_link; + case TVAR: + { + StgTVar *tvar = ((StgTVar *) p); + evac_gen = 0; + tvar->current_value = evacuate((StgClosure*)tvar->current_value); + tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } - evac_gen = gen->no; - failed_to_evac = rtsFalse; + case TREC_HEADER: + { + StgTRecHeader *trec = ((StgTRecHeader *) p); + evac_gen = 0; + trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); + trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } - for (; p != END_MUT_LIST; p = next, next = p->mut_link) { + case TREC_CHUNK: + { + StgWord i; + StgTRecChunk *tc = ((StgTRecChunk *) p); + TRecEntry *e = &(tc -> entries[0]); + evac_gen = 0; + tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); + for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { + e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); + e->expected_value = evacuate((StgClosure*)e->expected_value); + e->new_value = evacuate((StgClosure*)e->new_value); + } + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } - ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); - info = get_itbl(p); - /* - if (info->type==RBH) - info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure - */ - switch(info->type) { - case IND_OLDGEN: case IND_OLDGEN_PERM: case IND_STATIC: - /* Try to pull the indirectee into this generation, so we can - * remove the indirection from the mutable list. - */ - ((StgIndOldGen *)p)->indirectee = - evacuate(((StgIndOldGen *)p)->indirectee); - + { + /* Careful here: a THUNK can be on the mutable list because + * it contains pointers to young gen objects. If such a thunk + * is updated, the IND_OLDGEN will be added to the mutable + * list again, and we'll scavenge it twice. evacuate() + * doesn't check whether the object has already been + * evacuated, so we perform that check here. + */ + StgClosure *q = ((StgInd *)p)->indirectee; + if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) { + break; + } + ((StgInd *)p)->indirectee = evacuate(q); + } + #if 0 && defined(DEBUG) if (RtsFlags.DebugFlags.gc) /* Debugging code to print out the size of the thing we just @@ -3198,237 +3562,53 @@ scavenge_mut_once_list(generation *gen) } else { size = gen->steps[0].scan - start; } - belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_)); + debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_)); } #endif - - /* failed_to_evac might happen if we've got more than two - * generations, we're collecting only generation 0, the - * indirection resides in generation 2 and the indirectee is - * in generation 1. - */ - if (failed_to_evac) { - failed_to_evac = rtsFalse; - p->mut_link = new_list; - new_list = p; - } else { - /* the mut_link field of an IND_STATIC is overloaded as the - * static link field too (it just so happens that we don't need - * both at the same time), so we need to NULL it out when - * removing this object from the mutable list because the static - * link fields are all assumed to be NULL before doing a major - * collection. - */ - p->mut_link = NULL; - } - continue; - - case MUT_CONS: - /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove - * it from the mutable list if possible by promoting whatever it - * points to. - */ - if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) { - /* didn't manage to promote everything, so put the - * MUT_CONS back on the list. - */ - p->mut_link = new_list; - new_list = p; - } - continue; + break; default: - // shouldn't have anything else on the mutables list - barf("scavenge_mut_once_list: strange object? %d", (int)(info->type)); - } - } + barf("scavenge_one: strange object %d", (int)(info->type)); + } - gen->mut_once_list = new_list; + no_luck = failed_to_evac; + failed_to_evac = rtsFalse; + return (no_luck); } +/* ----------------------------------------------------------------------------- + Scavenging mutable lists. + + We treat the mutable list of each generation > N (i.e. all the + generations older than the one being collected) as roots. We also + remove non-mutable objects from the mutable list at this point. + -------------------------------------------------------------------------- */ static void scavenge_mutable_list(generation *gen) { - const StgInfoTable *info; - StgMutClosure *p, *next; - - p = gen->saved_mut_list; - next = p->mut_link; - - evac_gen = 0; - failed_to_evac = rtsFalse; - - for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - - ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); - info = get_itbl(p); - /* - if (info->type==RBH) - info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure - */ - switch(info->type) { - - case MUT_ARR_PTRS: - // follow everything - p->mut_link = gen->mut_list; - gen->mut_list = p; - { - StgPtr end, q; - - end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); - } - continue; - } - - // Happens if a MUT_ARR_PTRS in the old generation is frozen - case MUT_ARR_PTRS_FROZEN: - { - StgPtr end, q; - - evac_gen = gen->no; - end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); - } - evac_gen = 0; - p->mut_link = NULL; - if (failed_to_evac) { - failed_to_evac = rtsFalse; - mkMutCons((StgClosure *)p, gen); - } - continue; - } - - case MUT_VAR: - ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; - - case MVAR: - { - StgMVar *mvar = (StgMVar *)p; - (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head); - (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail); - (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value); - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; - } - - case TSO: - { - StgTSO *tso = (StgTSO *)p; - - scavengeTSO(tso); - - /* Don't take this TSO off the mutable list - it might still - * point to some younger objects (because we set evac_gen to 0 - * above). - */ - tso->mut_link = gen->mut_list; - gen->mut_list = (StgMutClosure *)tso; - continue; - } - - case BLACKHOLE_BQ: - { - StgBlockingQueue *bh = (StgBlockingQueue *)p; - (StgClosure *)bh->blocking_queue = - evacuate((StgClosure *)bh->blocking_queue); - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; - } - - /* Happens if a BLACKHOLE_BQ in the old generation is updated: - */ - case IND_OLDGEN: - case IND_OLDGEN_PERM: - /* Try to pull the indirectee into this generation, so we can - * remove the indirection from the mutable list. - */ - evac_gen = gen->no; - ((StgIndOldGen *)p)->indirectee = - evacuate(((StgIndOldGen *)p)->indirectee); - evac_gen = 0; - - if (failed_to_evac) { - failed_to_evac = rtsFalse; - p->mut_link = gen->mut_once_list; - gen->mut_once_list = p; - } else { - p->mut_link = NULL; - } - continue; - -#if defined(PAR) - // HWL: check whether all of these are necessary - - case RBH: // cf. BLACKHOLE_BQ - { - // nat size, ptrs, nonptrs, vhs; - // char str[80]; - // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); - StgRBH *rbh = (StgRBH *)p; - (StgClosure *)rbh->blocking_queue = - evacuate((StgClosure *)rbh->blocking_queue); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)rbh); - } - // ToDo: use size of reverted closure here! - p += BLACKHOLE_sizeW(); - break; - } - - case BLOCKED_FETCH: - { - StgBlockedFetch *bf = (StgBlockedFetch *)p; - // follow the pointer to the node which is being demanded - (StgClosure *)bf->node = - evacuate((StgClosure *)bf->node); - // follow the link to the rest of the blocking queue - (StgClosure *)bf->link = - evacuate((StgClosure *)bf->link); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)bf); - } - p += sizeofW(StgBlockedFetch); - break; - } + bdescr *bd; + StgPtr p, q; -#ifdef DIST - case REMOTE_REF: - barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type)); -#endif - case FETCH_ME: - p += sizeofW(StgFetchMe); - break; // nothing to do in this case + bd = gen->saved_mut_list; - case FETCH_ME_BQ: // cf. BLACKHOLE_BQ - { - StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; - (StgClosure *)fmbq->blocking_queue = - evacuate((StgClosure *)fmbq->blocking_queue); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)fmbq); + evac_gen = gen->no; + for (; bd != NULL; bd = bd->link) { + for (q = bd->start; q < bd->free; q++) { + p = (StgPtr)*q; + ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); + if (scavenge_one(p)) { + /* didn't manage to promote everything, so put the + * object back on the list. + */ + recordMutableGen((StgClosure *)p,gen); + } } - p += sizeofW(StgFetchMeBlockingQueue); - break; - } -#endif - - default: - // shouldn't have anything else on the mutables list - barf("scavenge_mutable_list: strange object? %d", (int)(info->type)); } - } + + // free the old mut_list + freeChain(gen->saved_mut_list); + gen->saved_mut_list = NULL; } @@ -3457,8 +3637,8 @@ scavenge_static(void) /* Take this object *off* the static_objects list, * and put it on the scavenged_static_objects list. */ - static_objects = STATIC_LINK(info,p); - STATIC_LINK(info,p) = scavenged_static_objects; + static_objects = *STATIC_LINK(info,p); + *STATIC_LINK(info,p) = scavenged_static_objects; scavenged_static_objects = p; switch (info -> type) { @@ -3469,15 +3649,13 @@ scavenge_static(void) ind->indirectee = evacuate(ind->indirectee); /* might fail to evacuate it, in which case we have to pop it - * back on the mutable list (and take it off the - * scavenged_static list because the static link and mut link - * pointers are one and the same). + * back on the mutable list of the oldest generation. We + * leave it *on* the scavenged_static_objects list, though, + * in case we visit this object again. */ if (failed_to_evac) { failed_to_evac = rtsFalse; - scavenged_static_objects = IND_STATIC_LINK(p); - ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list; - oldest_gen->mut_once_list = (StgMutClosure *)ind; + recordMutableGen((StgClosure *)p,oldest_gen); } break; } @@ -3497,7 +3675,7 @@ scavenge_static(void) next = (P_)p->payload + info->layout.payload.ptrs; // evacuate the pointers for (q = (P_)p->payload; q < next; q++) { - (StgClosure *)*q = evacuate((StgClosure *)*q); + *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); } break; } @@ -3530,7 +3708,7 @@ scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size ) bitmap = large_bitmap->bitmap[b]; for (i = 0; i < size; ) { if ((bitmap & 1) == 0) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } i++; p++; @@ -3543,12 +3721,12 @@ scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size ) } } -static inline StgPtr +STATIC_INLINE StgPtr scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap) { while (size > 0) { if ((bitmap & 1) == 0) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } p++; bitmap = bitmap >> 1; @@ -3571,7 +3749,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) StgWord bitmap; nat size; - //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end)); + //IF_DEBUG(sanity, debugBelch(" scavenging stack between %p and %p", p, stack_end)); /* * Each time around this loop, we are looking at a chunk of stack @@ -3590,6 +3768,9 @@ scavenge_stack(StgPtr p, StgPtr stack_end) continue; // small bitmap (< 32 entries, or 64 on a 64-bit machine) + case CATCH_STM_FRAME: + case CATCH_RETRY_FRAME: + case ATOMICALLY_FRAME: case STOP_FRAME: case CATCH_FRAME: case RET_SMALL: @@ -3602,7 +3783,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) p = scavenge_small_bitmap(p, size, bitmap); follow_srt: - scavenge_srt((StgClosure **)info->srt, info->i.srt_len); + scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap); continue; case RET_BCO: { @@ -3610,7 +3791,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) nat size; p++; - (StgClosure *)*p = evacuate((StgClosure *)*p); + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); bco = (StgBCO *)*p; p++; size = BCO_BITMAP_SIZE(bco); @@ -3625,9 +3806,9 @@ scavenge_stack(StgPtr p, StgPtr stack_end) { nat size; - size = info->i.layout.large_bitmap->size; + size = GET_LARGE_BITMAP(&info->i)->size; p++; - scavenge_large_bitmap(p, info->i.layout.large_bitmap, size); + scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size); p += size; // and don't forget to follow the SRT goto follow_srt; @@ -3643,17 +3824,17 @@ scavenge_stack(StgPtr p, StgPtr stack_end) dyn = ((StgRetDyn *)p)->liveness; // traverse the bitmap first - bitmap = GET_LIVENESS(dyn); + bitmap = RET_DYN_LIVENESS(dyn); p = (P_)&((StgRetDyn *)p)->payload[0]; - size = RET_DYN_SIZE; + size = RET_DYN_BITMAP_SIZE; p = scavenge_small_bitmap(p, size, bitmap); // skip over the non-ptr words - p += GET_NONPTRS(dyn); + p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE; // follow the ptr words - for (size = GET_PTRS(dyn); size > 0; size--) { - (StgClosure *)*p = evacuate((StgClosure *)*p); + for (size = RET_DYN_PTRS(dyn); size > 0; size--) { + *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); p++; } continue; @@ -3708,7 +3889,7 @@ scavenge_large(step *stp) p = bd->start; if (scavenge_one(p)) { - mkMutCons((StgClosure *)p, stp->gen); + recordMutableGen((StgClosure *)p, stp->gen); } } } @@ -3726,28 +3907,8 @@ zero_static_object_list(StgClosure* first_static) for (p = first_static; p != END_OF_STATIC_LIST; p = link) { info = get_itbl(p); - link = STATIC_LINK(info, p); - STATIC_LINK(info,p) = NULL; - } -} - -/* This function is only needed because we share the mutable link - * field with the static link field in an IND_STATIC, so we have to - * zero the mut_link field before doing a major GC, which needs the - * static link field. - * - * It doesn't do any harm to zero all the mutable link fields on the - * mutable list. - */ - -static void -zero_mutable_list( StgMutClosure *first ) -{ - StgMutClosure *next, *c; - - for (c = first; c != END_MUT_LIST; c = next) { - next = c->mut_link; - c->mut_link = NULL; + link = *STATIC_LINK(info, p); + *STATIC_LINK(info,p) = NULL; } } @@ -3760,14 +3921,14 @@ revertCAFs( void ) { StgIndStatic *c; - for (c = (StgIndStatic *)caf_list; c != NULL; + for (c = (StgIndStatic *)revertible_caf_list; c != NULL; c = (StgIndStatic *)c->static_link) { - c->header.info = c->saved_info; + SET_INFO(c, c->saved_info); c->saved_info = NULL; // could, but not necessary: c->static_link = NULL; } - caf_list = NULL; + revertible_caf_list = NULL; } void @@ -3780,6 +3941,11 @@ markCAFs( evac_fn evac ) { evac(&c->indirectee); } + for (c = (StgIndStatic *)revertible_caf_list; c != NULL; + c = (StgIndStatic *)c->static_link) + { + evac(&c->indirectee); + } } /* ----------------------------------------------------------------------------- @@ -3815,7 +3981,7 @@ gcCAFs(void) ASSERT(info->type == IND_STATIC); if (STATIC_LINK(info,p) == NULL) { - IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p)); + IF_DEBUG(gccafs, debugBelch("CAF gc'd at 0x%04lx", (long)p)); // black hole it SET_INFO(p,&stg_BLACKHOLE_info); p = STATIC_LINK2(info,p); @@ -3829,7 +3995,7 @@ gcCAFs(void) } - // belch("%d CAFs live", i); + // debugBelch("%d CAFs live", i); } #endif @@ -3847,7 +4013,7 @@ threadLazyBlackHole(StgTSO *tso) { StgClosure *frame; StgRetInfoTable *info; - StgBlockingQueue *bh; + StgClosure *bh; StgPtr stack_end; stack_end = &tso->stack[tso->stack_size]; @@ -3860,7 +4026,7 @@ threadLazyBlackHole(StgTSO *tso) switch (info->i.type) { case UPDATE_FRAME: - bh = (StgBlockingQueue *)((StgUpdateFrame *)frame)->updatee; + bh = ((StgUpdateFrame *)frame)->updatee; /* if the thunk is already blackholed, it means we've also * already blackholed the rest of the thunks on this stack, @@ -3873,10 +4039,9 @@ threadLazyBlackHole(StgTSO *tso) return; } - if (bh->header.info != &stg_BLACKHOLE_BQ_info && - bh->header.info != &stg_CAF_BLACKHOLE_info) { + if (bh->header.info != &stg_CAF_BLACKHOLE_info) { #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) - belch("Unexpected lazy BHing required at 0x%04x",(int)bh); + debugBelch("Unexpected lazy BHing required at 0x%04x\n",(int)bh); #endif #ifdef PROFILING // @LDV profiling @@ -3884,11 +4049,9 @@ threadLazyBlackHole(StgTSO *tso) LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); #endif SET_INFO(bh,&stg_BLACKHOLE_info); -#ifdef PROFILING - // @LDV profiling + // We pretend that bh has just been created. - LDV_recordCreate(bh); -#endif + LDV_RECORD_CREATE(bh); } frame = (StgClosure *) ((StgUpdateFrame *)frame + 1); @@ -3899,7 +4062,7 @@ threadLazyBlackHole(StgTSO *tso) // normal stack frames; do nothing except advance the pointer default: - (StgPtr)frame += stack_frame_sizeW(frame); + frame = (StgClosure *)((StgPtr)frame + stack_frame_sizeW(frame)); } } } @@ -3980,7 +4143,6 @@ threadSqueezeStack(StgTSO *tso) * screw us up if we don't check. */ if (upd->updatee != updatee && !closure_IND(upd->updatee)) { - // this wakes the threads up UPD_IND_NOLOCK(upd->updatee, updatee); } @@ -3998,14 +4160,13 @@ threadSqueezeStack(StgTSO *tso) // single update frame, or the topmost update frame in a series else { - StgBlockingQueue *bh = (StgBlockingQueue *)upd->updatee; + StgClosure *bh = upd->updatee; // Do lazy black-holing if (bh->header.info != &stg_BLACKHOLE_info && - bh->header.info != &stg_BLACKHOLE_BQ_info && bh->header.info != &stg_CAF_BLACKHOLE_info) { #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) - belch("Unexpected lazy BHing required at 0x%04x",(int)bh); + debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh); #endif #ifdef DEBUG /* zero out the slop so that the sanity checker can tell @@ -4021,8 +4182,8 @@ threadSqueezeStack(StgTSO *tso) * same size as a BLACKHOLE in any case. */ if (bh_info->type != THUNK_SELECTOR) { - for (i = np; i < np + nw; i++) { - ((StgClosure *)bh)->payload[i] = 0; + for (i = 0; i < np + nw; i++) { + ((StgClosure *)bh)->payload[i] = INVALID_OBJECT; } } } @@ -4031,12 +4192,11 @@ threadSqueezeStack(StgTSO *tso) // We pretend that bh is now dead. LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); #endif - // Todo: maybe use SET_HDR() and remove LDV_recordCreate()? + // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()? SET_INFO(bh,&stg_BLACKHOLE_info); -#ifdef PROFILING + // We pretend that bh has just been created. - LDV_recordCreate(bh); -#endif + LDV_RECORD_CREATE(bh); } prev_was_update_frame = rtsTrue; @@ -4091,19 +4251,19 @@ done_traversing: void *gap_start, *next_gap_start, *gap_end; nat chunk_size; - next_gap_start = (void *)gap + sizeof(StgUpdateFrame); + next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame)); sp = next_gap_start; while ((StgPtr)gap > tso->sp) { // we're working in *bytes* now... gap_start = next_gap_start; - gap_end = gap_start - gap->gap_size * sizeof(W_); + gap_end = (void*) ((unsigned char*)gap_start - gap->gap_size * sizeof(W_)); gap = gap->next_gap; - next_gap_start = (void *)gap + sizeof(StgUpdateFrame); + next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame)); - chunk_size = gap_end - next_gap_start; + chunk_size = (unsigned char*)gap_end - (unsigned char*)next_gap_start; sp -= chunk_size; memmove(sp, next_gap_start, chunk_size); } @@ -4134,49 +4294,18 @@ threadPaused(StgTSO *tso) #if DEBUG void -printMutOnceList(generation *gen) -{ - StgMutClosure *p, *next; - - p = gen->mut_once_list; - next = p->mut_link; - - fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list); - for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - fprintf(stderr, "%p (%s), ", - p, info_type((StgClosure *)p)); - } - fputc('\n', stderr); -} - -void printMutableList(generation *gen) { - StgMutClosure *p, *next; - - p = gen->mut_list; - next = p->mut_link; + bdescr *bd; + StgPtr p; - fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list); - for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - fprintf(stderr, "%p (%s), ", - p, info_type((StgClosure *)p)); - } - fputc('\n', stderr); -} + debugBelch("@@ Mutable list %p: ", gen->mut_list); -static inline rtsBool -maybeLarge(StgClosure *closure) -{ - StgInfoTable *info = get_itbl(closure); - - /* closure types that may be found on the new_large_objects list; - see scavenge_large */ - return (info->type == MUT_ARR_PTRS || - info->type == MUT_ARR_PTRS_FROZEN || - info->type == TSO || - info->type == ARR_WORDS); + for (bd = gen->mut_list; bd != NULL; bd = bd->link) { + for (p = bd->start; p < bd->free; p++) { + debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p)); + } + } + debugBelch("\n"); } - - -#endif // DEBUG +#endif /* DEBUG */