X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.h;h=79fee9da56869d4823a194920c8e79fe459a568b;hb=70541676b8c2de0bcecf42eb2edcb2a11f53f83e;hp=fc93e01596299ac6dda96d62675d9c6950f638df;hpb=7f309f1c021e7583f724cce599ce2dd3c439361b;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.h b/ghc/rts/Storage.h index fc93e01..79fee9d 100644 --- a/ghc/rts/Storage.h +++ b/ghc/rts/Storage.h @@ -1,7 +1,7 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.h,v 1.7 1999/02/05 16:03:01 simonm Exp $ + * $Id: Storage.h,v 1.51 2003/03/27 13:54:32 simonmar Exp $ * - * (c) The GHC Team, 1998-1999 + * (c) The GHC Team, 1998-2002 * * External Storage Manger Interface * @@ -11,8 +11,12 @@ #define STORAGE_H #include "Block.h" +#include "MBlock.h" #include "BlockAlloc.h" #include "StoragePriv.h" +#ifdef PROFILING +#include "LdvProfile.h" +#endif /* ----------------------------------------------------------------------------- Initialisation / De-initialisation @@ -24,12 +28,24 @@ extern void exitStorage(void); /* ----------------------------------------------------------------------------- Generic allocation - StgPtr allocate(int n) Allocates a chunk of contiguous store + StgPtr allocate(nat n) Allocates a chunk of contiguous store n words long, returning a pointer to the first word. Always succeeds. + StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store + n words long, which is at a fixed + address (won't be moved by GC). + Returns a pointer to the first word. + Always succeeds. + + NOTE: the GC can't in general handle + pinned objects, so allocatePinned() + can only be used for ByteArrays at the + moment. + Don't forget to TICK_ALLOC_XXX(...) - after calling allocate, for the + after calling allocate or + allocatePinned, for the benefit of the ticky-ticky profiler. rtsBool doYouWantToGC(void) Returns True if the storage manager is @@ -37,15 +53,21 @@ extern void exitStorage(void); lnat allocated_bytes(void) Returns the number of bytes allocated via allocate() since the last GC. - Used in the reoprting of statistics. + Used in the reporting of statistics. + + SMP: allocate and doYouWantToGC can be used from STG code, they are + surrounded by a mutex. -------------------------------------------------------------------------- */ -extern StgPtr allocate(nat n); -static inline rtsBool doYouWantToGC(void) +extern StgPtr allocate ( nat n ); +extern StgPtr allocatePinned ( nat n ); +extern lnat allocated_bytes ( void ); + +static inline rtsBool +doYouWantToGC( void ) { return (alloc_blocks >= alloc_blocks_lim); } -extern lnat allocated_bytes(void); /* ----------------------------------------------------------------------------- ExtendNursery(hp,hplim) When hplim is reached, try to grab @@ -56,14 +78,12 @@ extern lnat allocated_bytes(void); -------------------------------------------------------------------------- */ #define ExtendNursery(hp,hplim) \ - (current_nursery->free = (P_)(hp)+1, \ - current_nursery->link == NULL ? rtsFalse : \ - (current_nursery = current_nursery->link, \ + (CloseNursery(hp), \ + CurrentNursery->link == NULL ? rtsFalse : \ + (CurrentNursery = CurrentNursery->link, \ OpenNursery(hp,hplim), \ rtsTrue)) -extern void PleaseStopAllocating(void); - /* ----------------------------------------------------------------------------- Performing Garbage Collection @@ -75,35 +95,55 @@ extern void PleaseStopAllocating(void); MarkRoot(StgClosure *p) Returns the new location of the root. -------------------------------------------------------------------------- */ -extern void GarbageCollect(void (*get_roots)(void)); -extern StgClosure *MarkRoot(StgClosure *p); +extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc); /* ----------------------------------------------------------------------------- Generational garbage collection support - RecordMutable(StgPtr p) Informs the garbage collector that a + recordMutable(StgPtr p) Informs the garbage collector that a previously immutable object has become (permanently) mutable. Used by thawArray and similar. - UpdateWithIndirection(p1,p2) Updates the object at p1 with an + updateWithIndirection(p1,p2) Updates the object at p1 with an indirection pointing to p2. This is normally called for objects in an old generation (>0) when they are updated. + updateWithPermIndirection(p1,p2) As above but uses a permanent indir. + -------------------------------------------------------------------------- */ +/* + * Storage manager mutex + */ +#if defined(SMP) +extern Mutex sm_mutex; +#define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex) +#define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex) +#else +#define ACQUIRE_SM_LOCK +#define RELEASE_SM_LOCK +#endif + +/* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some + * kind of lock in the SMP case? + */ static inline void recordMutable(StgMutClosure *p) { bdescr *bd; +#ifdef SMP + ASSERT(p->header.info == &stg_WHITEHOLE_info || closure_MUTABLE(p)); +#else ASSERT(closure_MUTABLE(p)); +#endif bd = Bdescr((P_)p); - if (bd->gen->no > 0) { - p->mut_link = bd->gen->mut_list; - bd->gen->mut_list = p; + if (bd->gen_no > 0) { + p->mut_link = generations[bd->gen_no].mut_list; + generations[bd->gen_no].mut_list = p; } } @@ -113,37 +153,298 @@ recordOldToNewPtrs(StgMutClosure *p) bdescr *bd; bd = Bdescr((P_)p); - if (bd->gen->no > 0) { - p->mut_link = bd->gen->mut_once_list; - bd->gen->mut_once_list = p; + if (bd->gen_no > 0) { + p->mut_link = generations[bd->gen_no].mut_once_list; + generations[bd->gen_no].mut_once_list = p; } } +// @LDV profiling +// We zero out the slop when PROFILING is on. +// #ifndef DEBUG +#if !defined(DEBUG) && !defined(PROFILING) +#define updateWithIndirection(info, ind_info, p1, p2, and_then) \ + { \ + bdescr *bd; \ + \ + bd = Bdescr((P_)p1); \ + if (bd->gen_no == 0) { \ + ((StgInd *)p1)->indirectee = p2; \ + SET_INFO(p1,ind_info); \ + TICK_UPD_NEW_IND(); \ + and_then; \ + } else { \ + ((StgIndOldGen *)p1)->indirectee = p2; \ + if (info != &stg_BLACKHOLE_BQ_info) { \ + ACQUIRE_SM_LOCK; \ + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \ + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \ + RELEASE_SM_LOCK; \ + } \ + SET_INFO(p1,&stg_IND_OLDGEN_info); \ + TICK_UPD_OLD_IND(); \ + and_then; \ + } \ + } +#elif defined(PROFILING) +// @LDV profiling +// We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in +// which p1 resides. +// +// Note: +// After all, we do *NOT* need to call LDV_recordCreate() for both IND and +// IND_OLDGEN closures because they are inherently used. But, it corrupts +// the invariants that every closure keeps its creation time in the profiling +// field. So, we call LDV_recordCreate(). + +#define updateWithIndirection(info, ind_info, p1, p2, and_then) \ + { \ + bdescr *bd; \ + \ + LDV_recordDead_FILL_SLOP_DYNAMIC((p1)); \ + bd = Bdescr((P_)p1); \ + if (bd->gen_no == 0) { \ + ((StgInd *)p1)->indirectee = p2; \ + SET_INFO(p1,ind_info); \ + LDV_recordCreate((p1)); \ + TICK_UPD_NEW_IND(); \ + and_then; \ + } else { \ + ((StgIndOldGen *)p1)->indirectee = p2; \ + if (info != &stg_BLACKHOLE_BQ_info) { \ + ACQUIRE_SM_LOCK; \ + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \ + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \ + RELEASE_SM_LOCK; \ + } \ + SET_INFO(p1,&stg_IND_OLDGEN_info); \ + LDV_recordCreate((p1)); \ + and_then; \ + } \ + } + +#else + +/* In the DEBUG case, we also zero out the slop of the old closure, + * so that the sanity checker can tell where the next closure is. + * + * Two important invariants: we should never try to update a closure + * to point to itself, and the closure being updated should not + * already have been updated (the mutable list will get messed up + * otherwise). + */ +#define updateWithIndirection(info, ind_info, p1, p2, and_then) \ + { \ + bdescr *bd; \ + \ + ASSERT( p1 != p2 && !closure_IND(p1) ); \ + bd = Bdescr((P_)p1); \ + if (bd->gen_no == 0) { \ + ((StgInd *)p1)->indirectee = p2; \ + SET_INFO(p1,ind_info); \ + TICK_UPD_NEW_IND(); \ + and_then; \ + } else { \ + if (info != &stg_BLACKHOLE_BQ_info) { \ + { \ + StgInfoTable *inf = get_itbl(p1); \ + nat np = inf->layout.payload.ptrs, \ + nw = inf->layout.payload.nptrs, i; \ + if (inf->type != THUNK_SELECTOR) { \ + for (i = 0; i < np + nw; i++) { \ + ((StgClosure *)p1)->payload[i] = 0; \ + } \ + } \ + } \ + ACQUIRE_SM_LOCK; \ + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \ + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \ + RELEASE_SM_LOCK; \ + } \ + ((StgIndOldGen *)p1)->indirectee = p2; \ + SET_INFO(p1,&stg_IND_OLDGEN_info); \ + TICK_UPD_OLD_IND(); \ + and_then; \ + } \ + } +#endif + +/* Static objects all live in the oldest generation + */ +#define updateWithStaticIndirection(info, p1, p2) \ + { \ + ASSERT( p1 != p2 && !closure_IND(p1) ); \ + ASSERT( ((StgMutClosure*)p1)->mut_link == NULL ); \ + \ + ACQUIRE_SM_LOCK; \ + ((StgMutClosure *)p1)->mut_link = oldest_gen->mut_once_list; \ + oldest_gen->mut_once_list = (StgMutClosure *)p1; \ + RELEASE_SM_LOCK; \ + \ + ((StgInd *)p1)->indirectee = p2; \ + SET_INFO((StgInd *)p1, &stg_IND_STATIC_info); \ + TICK_UPD_STATIC_IND(); \ + } + +#if defined(TICKY_TICKY) || defined(PROFILING) static inline void -updateWithIndirection(StgClosure *p1, StgClosure *p2) +updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *p2) { bdescr *bd; + ASSERT( p1 != p2 && !closure_IND(p1) ); + +#ifdef PROFILING + // @LDV profiling + // Destroy the old closure. + // Nb: LDV_* stuff cannot mix with ticky-ticky + LDV_recordDead_FILL_SLOP_DYNAMIC(p1); +#endif bd = Bdescr((P_)p1); - if (bd->gen->no == 0) { - SET_INFO(p1,&IND_info); + if (bd->gen_no == 0) { ((StgInd *)p1)->indirectee = p2; - TICK_UPD_NEW_IND(); + SET_INFO(p1,&stg_IND_PERM_info); +#ifdef PROFILING + // @LDV profiling + // We have just created a new closure. + LDV_recordCreate(p1); +#endif + TICK_UPD_NEW_PERM_IND(p1); } else { - SET_INFO(p1,&IND_OLDGEN_info); ((StgIndOldGen *)p1)->indirectee = p2; - ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; - bd->gen->mut_once_list = (StgMutClosure *)p1; - TICK_UPD_OLD_IND(); + if (info != &stg_BLACKHOLE_BQ_info) { + ACQUIRE_SM_LOCK; + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; + RELEASE_SM_LOCK; + } + SET_INFO(p1,&stg_IND_OLDGEN_PERM_info); +#ifdef PROFILING + // @LDV profiling + // We have just created a new closure. + LDV_recordCreate(p1); +#endif + TICK_UPD_OLD_PERM_IND(); } } +#endif + +/* ----------------------------------------------------------------------------- + The CAF table - used to let us revert CAFs in GHCi + -------------------------------------------------------------------------- */ + +void revertCAFs( void ); /* ----------------------------------------------------------------------------- - The CAF list - used to let us revert CAFs + DEBUGGING predicates for pointers + + LOOKS_LIKE_INFO_PTR(p) returns False if p is definitely not an info ptr + LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr + These macros are complete but not sound. That is, they might + return false positives. Do not rely on them to distinguish info + pointers from closure pointers, for example. + + We don't use address-space predicates these days, for portability + reasons, and the fact that code/data can be scattered about the + address space in a dynamically-linked environment. Our best option + is to look at the alleged info table and see whether it seems to + make sense... -------------------------------------------------------------------------- */ -extern StgCAF* enteredCAFs; +#define LOOKS_LIKE_INFO_PTR(p) \ + (p && ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \ + ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES) + +#define LOOKS_LIKE_CLOSURE_PTR(p) \ + (LOOKS_LIKE_INFO_PTR(((StgClosure *)(p))->header.info)) + +/* ----------------------------------------------------------------------------- + Macros for calculating how big a closure will be (used during allocation) + -------------------------------------------------------------------------- */ + +static __inline__ StgOffset PAP_sizeW ( nat n_args ) +{ return sizeofW(StgPAP) + n_args; } + +static __inline__ StgOffset AP_STACK_sizeW ( nat size ) +{ return sizeofW(StgAP_STACK) + size; } + +static __inline__ StgOffset CONSTR_sizeW( nat p, nat np ) +{ return sizeofW(StgHeader) + p + np; } + +static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void ) +{ return sizeofW(StgHeader) + MIN_UPD_SIZE; } + +static __inline__ StgOffset BLACKHOLE_sizeW ( void ) +{ return sizeofW(StgHeader) + MIN_UPD_SIZE; } + +/* -------------------------------------------------------------------------- + Sizes of closures + ------------------------------------------------------------------------*/ + +static __inline__ StgOffset sizeW_fromITBL( const StgInfoTable* itbl ) +{ return sizeofW(StgClosure) + + sizeofW(StgPtr) * itbl->layout.payload.ptrs + + sizeofW(StgWord) * itbl->layout.payload.nptrs; } + +static __inline__ StgOffset ap_stack_sizeW( StgAP_STACK* x ) +{ return AP_STACK_sizeW(x->size); } + +static __inline__ StgOffset pap_sizeW( StgPAP* x ) +{ return PAP_sizeW(x->n_args); } + +static __inline__ StgOffset arr_words_sizeW( StgArrWords* x ) +{ return sizeofW(StgArrWords) + x->words; } + +static __inline__ StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x ) +{ return sizeofW(StgMutArrPtrs) + x->ptrs; } + +static __inline__ StgWord tso_sizeW ( StgTSO *tso ) +{ return TSO_STRUCT_SIZEW + tso->stack_size; } + +static __inline__ StgWord bco_sizeW ( StgBCO *bco ) +{ return bco->size; } + +/* ----------------------------------------------------------------------------- + Sizes of stack frames + -------------------------------------------------------------------------- */ + +static inline StgWord stack_frame_sizeW( StgClosure *frame ) +{ + StgRetInfoTable *info; + + info = get_ret_itbl(frame); + switch (info->i.type) { + + case RET_DYN: + { + StgRetDyn *dyn = (StgRetDyn *)frame; + return sizeofW(StgRetDyn) + RET_DYN_SIZE + + GET_PTRS(dyn->liveness) + GET_NONPTRS(dyn->liveness); + } + + case RET_FUN: + return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size; + + case RET_BIG: + case RET_VEC_BIG: + return 1 + info->i.layout.large_bitmap->size; + + case RET_BCO: + return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]); + + default: + return 1 + BITMAP_SIZE(info->i.layout.bitmap); + } +} + +/* ----------------------------------------------------------------------------- + Debugging bits + -------------------------------------------------------------------------- */ -#endif STORAGE_H +#if defined(DEBUG) +void printMutOnceList(generation *gen); +void printMutableList(generation *gen); +#endif +#endif // STORAGE_H