X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=includes%2FStorage.h;h=e541082ee6fd0eacfdc94296e6030e4a52c34c00;hb=118b39e4db1a8187af52c1ddb1b299021834a5a2;hp=d431298af94689ea218dfe5e83bdf0a71507740c;hpb=c3062251034f54944061e816ed018b0b2db1b849;p=ghc-hetmet.git diff --git a/includes/Storage.h b/includes/Storage.h index d431298..e541082 100644 --- a/includes/Storage.h +++ b/includes/Storage.h @@ -81,7 +81,6 @@ typedef struct step_ { #if defined(THREADED_RTS) char pad[128]; // make sure the following is // on a separate cache line. - SpinLock sync_todo; // lock for todos SpinLock sync_large_objects; // lock for large_objects // and scavenged_large_objects #endif @@ -93,10 +92,6 @@ typedef struct step_ { unsigned int n_old_blocks; // number of blocks in from-space unsigned int live_estimate; // for sweeping: estimate of live data - bdescr * todos; // blocks waiting to be scavenged - bdescr * todos_last; - unsigned int n_todos; // count of above - bdescr * part_blocks; // partially-full scanned blocks unsigned int n_part_blocks; // count of above @@ -220,7 +215,7 @@ extern bdescr * splitLargeBlock (bdescr *bd, nat blocks); -------------------------------------------------------------------------- */ -extern void GarbageCollect(rtsBool force_major_gc); +extern void GarbageCollect(rtsBool force_major_gc, nat gc_type, Capability *cap); /* ----------------------------------------------------------------------------- Generational garbage collection support @@ -245,7 +240,6 @@ extern void GarbageCollect(rtsBool force_major_gc); #if defined(THREADED_RTS) extern Mutex sm_mutex; extern Mutex atomic_modify_mutvar_mutex; -extern SpinLock recordMutableGen_sync; #endif #if defined(THREADED_RTS) @@ -258,63 +252,40 @@ extern SpinLock recordMutableGen_sync; #define ASSERT_SM_LOCK() #endif +#if !IN_STG_CODE + INLINE_HEADER void -recordMutableGen(StgClosure *p, generation *gen) +recordMutableGen(StgClosure *p, nat gen_no) { bdescr *bd; - bd = gen->mut_list; + bd = generations[gen_no].mut_list; if (bd->free >= bd->start + BLOCK_SIZE_W) { bdescr *new_bd; new_bd = allocBlock(); new_bd->link = bd; bd = new_bd; - gen->mut_list = bd; + generations[gen_no].mut_list = bd; } *bd->free++ = (StgWord)p; } INLINE_HEADER void -recordMutableGenLock(StgClosure *p, generation *gen) +recordMutableGenLock(StgClosure *p, nat gen_no) { ACQUIRE_SM_LOCK; - recordMutableGen(p,gen); + recordMutableGen(p,gen_no); RELEASE_SM_LOCK; } -extern bdescr *allocBlock_sync(void); - -// Version of recordMutableGen() for use in parallel GC. The same as -// recordMutableGen(), except that we surround it with a spinlock and -// call the spinlock version of allocBlock(). -INLINE_HEADER void -recordMutableGen_GC(StgClosure *p, generation *gen) -{ - bdescr *bd; - - ACQUIRE_SPIN_LOCK(&recordMutableGen_sync); - - bd = gen->mut_list; - if (bd->free >= bd->start + BLOCK_SIZE_W) { - bdescr *new_bd; - new_bd = allocBlock_sync(); - new_bd->link = bd; - bd = new_bd; - gen->mut_list = bd; - } - *bd->free++ = (StgWord)p; - - RELEASE_SPIN_LOCK(&recordMutableGen_sync); -} - INLINE_HEADER void recordMutable(StgClosure *p) { bdescr *bd; ASSERT(closure_MUTABLE(p)); bd = Bdescr((P_)p); - if (bd->gen_no > 0) recordMutableGen(p, &RTS_DEREF(generations)[bd->gen_no]); + if (bd->gen_no > 0) recordMutableGen(p, bd->gen_no); } INLINE_HEADER void @@ -325,6 +296,8 @@ recordMutableLock(StgClosure *p) RELEASE_SM_LOCK; } +#endif // !IN_STG_CODE + /* ----------------------------------------------------------------------------- The CAF table - used to let us revert CAFs in GHCi -------------------------------------------------------------------------- */