X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.h;h=f746d1f0ca5f6bf4235c9ef01798839c4913cc81;hb=db61851c5472bf565cd1da900b33d6e033fd743d;hp=e32e9ada7c7636175de9cf7cf45fa675f873e013;hpb=23b442106a77d4a2f5c8c951b43feabab63160a0;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.h b/ghc/rts/Storage.h index e32e9ad..f746d1f 100644 --- a/ghc/rts/Storage.h +++ b/ghc/rts/Storage.h @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.h,v 1.24 2001/01/26 14:36:40 simonpj Exp $ + * $Id: Storage.h,v 1.37 2001/11/22 14:25:12 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -13,6 +13,9 @@ #include "Block.h" #include "BlockAlloc.h" #include "StoragePriv.h" +#ifdef PROFILING +#include "LdvProfile.h" +#endif /* ----------------------------------------------------------------------------- Initialisation / De-initialisation @@ -24,12 +27,24 @@ extern void exitStorage(void); /* ----------------------------------------------------------------------------- Generic allocation - StgPtr allocate(int n) Allocates a chunk of contiguous store + StgPtr allocate(nat n) Allocates a chunk of contiguous store n words long, returning a pointer to the first word. Always succeeds. + StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store + n words long, which is at a fixed + address (won't be moved by GC). + Returns a pointer to the first word. + Always succeeds. + + NOTE: the GC can't in general handle + pinned objects, so allocatePinned() + can only be used for ByteArrays at the + moment. + Don't forget to TICK_ALLOC_XXX(...) - after calling allocate, for the + after calling allocate or + allocatePinned, for the benefit of the ticky-ticky profiler. rtsBool doYouWantToGC(void) Returns True if the storage manager is @@ -43,12 +58,15 @@ extern void exitStorage(void); surrounded by a mutex. -------------------------------------------------------------------------- */ -extern StgPtr allocate(nat n); -static inline rtsBool doYouWantToGC(void) +extern StgPtr allocate ( nat n ); +extern StgPtr allocatePinned ( nat n ); +extern lnat allocated_bytes ( void ); + +static inline rtsBool +doYouWantToGC( void ) { return (alloc_blocks >= alloc_blocks_lim); } -extern lnat allocated_bytes(void); /* ----------------------------------------------------------------------------- ExtendNursery(hp,hplim) When hplim is reached, try to grab @@ -78,13 +96,7 @@ extern void PleaseStopAllocating(void); MarkRoot(StgClosure *p) Returns the new location of the root. -------------------------------------------------------------------------- */ -extern void GarbageCollect(void (*get_roots)(void),rtsBool force_major_gc); -extern StgClosure *MarkRoot(StgClosure *p); - -/* Temporary measure to ensure we retain all the dynamically-loaded CAFs */ -#ifdef GHCI -extern void markCafs( void ); -#endif +extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc); /* ----------------------------------------------------------------------------- Generational garbage collection support @@ -103,6 +115,13 @@ extern void markCafs( void ); -------------------------------------------------------------------------- */ +/* + * Storage manager mutex + */ +#ifdef SMP +extern pthread_mutex_t sm_mutex; +#endif + /* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some * kind of lock in the SMP case? */ @@ -118,9 +137,9 @@ recordMutable(StgMutClosure *p) #endif bd = Bdescr((P_)p); - if (bd->gen->no > 0) { - p->mut_link = bd->gen->mut_list; - bd->gen->mut_list = p; + if (bd->gen_no > 0) { + p->mut_link = generations[bd->gen_no].mut_list; + generations[bd->gen_no].mut_list = p; } } @@ -130,19 +149,22 @@ recordOldToNewPtrs(StgMutClosure *p) bdescr *bd; bd = Bdescr((P_)p); - if (bd->gen->no > 0) { - p->mut_link = bd->gen->mut_once_list; - bd->gen->mut_once_list = p; + if (bd->gen_no > 0) { + p->mut_link = generations[bd->gen_no].mut_once_list; + generations[bd->gen_no].mut_once_list = p; } } -#ifndef DEBUG +// @LDV profiling +// We zero out the slop when PROFILING is on. +// #ifndef DEBUG +#if !defined(DEBUG) && !defined(PROFILING) #define updateWithIndirection(info, p1, p2) \ { \ bdescr *bd; \ \ bd = Bdescr((P_)p1); \ - if (bd->gen->no == 0) { \ + if (bd->gen_no == 0) { \ ((StgInd *)p1)->indirectee = p2; \ SET_INFO(p1,&stg_IND_info); \ TICK_UPD_NEW_IND(); \ @@ -150,41 +172,84 @@ recordOldToNewPtrs(StgMutClosure *p) ((StgIndOldGen *)p1)->indirectee = p2; \ if (info != &stg_BLACKHOLE_BQ_info) { \ ACQUIRE_LOCK(&sm_mutex); \ - ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; \ - bd->gen->mut_once_list = (StgMutClosure *)p1; \ + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \ + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \ RELEASE_LOCK(&sm_mutex); \ } \ SET_INFO(p1,&stg_IND_OLDGEN_info); \ TICK_UPD_OLD_IND(); \ } \ } +#elif defined(PROFILING) +// @LDV profiling +// We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in +// which p1 resides. +// +// Note: +// After all, we do *NOT* need to call LDV_recordCreate() for both IND and +// IND_OLDGEN closures because they are inherently used. But, it corrupts +// the invariants that every closure keeps its creation time in the profiling +// field. So, we call LDV_recordCreate(). + +#define updateWithIndirection(info, p1, p2) \ + { \ + bdescr *bd; \ + \ + LDV_recordDead_FILL_SLOP_DYNAMIC((p1)); \ + bd = Bdescr((P_)p1); \ + if (bd->gen_no == 0) { \ + ((StgInd *)p1)->indirectee = p2; \ + SET_INFO(p1,&stg_IND_info); \ + LDV_recordCreate((p1)); \ + TICK_UPD_NEW_IND(); \ + } else { \ + ((StgIndOldGen *)p1)->indirectee = p2; \ + if (info != &stg_BLACKHOLE_BQ_info) { \ + ACQUIRE_LOCK(&sm_mutex); \ + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \ + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \ + RELEASE_LOCK(&sm_mutex); \ + } \ + SET_INFO(p1,&stg_IND_OLDGEN_info); \ + LDV_recordCreate((p1)); \ + } \ + } + #else /* In the DEBUG case, we also zero out the slop of the old closure, * so that the sanity checker can tell where the next closure is. + * + * Two important invariants: we should never try to update a closure + * to point to itself, and the closure being updated should not + * already have been updated (the mutable list will get messed up + * otherwise). */ #define updateWithIndirection(info, p1, p2) \ { \ bdescr *bd; \ \ + ASSERT( p1 != p2 && !closure_IND(p1) ); \ bd = Bdescr((P_)p1); \ - if (bd->gen->no == 0) { \ + if (bd->gen_no == 0) { \ ((StgInd *)p1)->indirectee = p2; \ SET_INFO(p1,&stg_IND_info); \ TICK_UPD_NEW_IND(); \ } else { \ if (info != &stg_BLACKHOLE_BQ_info) { \ - { \ + { \ StgInfoTable *inf = get_itbl(p1); \ nat np = inf->layout.payload.ptrs, \ nw = inf->layout.payload.nptrs, i; \ - for (i = np; i < np + nw; i++) { \ - ((StgClosure *)p1)->payload[i] = 0; \ + if (inf->type != THUNK_SELECTOR) { \ + for (i = np; i < np + nw; i++) { \ + ((StgClosure *)p1)->payload[i] = 0; \ + } \ } \ } \ ACQUIRE_LOCK(&sm_mutex); \ - ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; \ - bd->gen->mut_once_list = (StgMutClosure *)p1; \ + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \ + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \ RELEASE_LOCK(&sm_mutex); \ } \ ((StgIndOldGen *)p1)->indirectee = p2; \ @@ -198,6 +263,7 @@ recordOldToNewPtrs(StgMutClosure *p) */ #define updateWithStaticIndirection(info, p1, p2) \ { \ + ASSERT( p1 != p2 && !closure_IND(p1) ); \ ASSERT( ((StgMutClosure*)p1)->mut_link == NULL ); \ \ ACQUIRE_LOCK(&sm_mutex); \ @@ -216,20 +282,31 @@ updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure * { bdescr *bd; + ASSERT( p1 != p2 && !closure_IND(p1) ); + + // @LDV profiling + // Destroy the old closure. + LDV_recordDead_FILL_SLOP_DYNAMIC(p1); bd = Bdescr((P_)p1); - if (bd->gen->no == 0) { + if (bd->gen_no == 0) { ((StgInd *)p1)->indirectee = p2; SET_INFO(p1,&stg_IND_PERM_info); + // @LDV profiling + // We have just created a new closure. + LDV_recordCreate(p1); TICK_UPD_NEW_PERM_IND(p1); } else { ((StgIndOldGen *)p1)->indirectee = p2; if (info != &stg_BLACKHOLE_BQ_info) { ACQUIRE_LOCK(&sm_mutex); - ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; - bd->gen->mut_once_list = (StgMutClosure *)p1; + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; RELEASE_LOCK(&sm_mutex); } SET_INFO(p1,&stg_IND_OLDGEN_PERM_info); + // @LDV profiling + // We have just created a new closure. + LDV_recordCreate(p1); TICK_UPD_OLD_PERM_IND(); } } @@ -239,25 +316,12 @@ updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure * The CAF table - used to let us revert CAFs -------------------------------------------------------------------------- */ -#if defined(INTERPRETER) -typedef struct StgCAFTabEntry_ { - StgClosure* closure; - StgInfoTable* origItbl; -} StgCAFTabEntry; - -extern void addToECafTable ( StgClosure* closure, StgInfoTable* origItbl ); -extern void clearECafTable ( void ); - -extern StgCAF* ecafList; -extern StgCAFTabEntry* ecafTable; -extern StgInt usedECafTable; -extern StgInt sizeECafTable; -#endif +void revertCAFs( void ); #if defined(DEBUG) void printMutOnceList(generation *gen); void printMutableList(generation *gen); -#endif DEBUG +#endif /* DEBUG */ /* -------------------------------------------------------------------------- Address space layout macros @@ -342,7 +406,6 @@ void printMutableList(generation *gen); extern void* TEXT_SECTION_END_MARKER_DECL; extern void* DATA_SECTION_END_MARKER_DECL; -#if defined(INTERPRETER) || defined(GHCI) /* Take into account code sections in dynamically loaded object files. */ #define IS_CODE_PTR(p) ( ((P_)(p) < (P_)&TEXT_SECTION_END_MARKER) \ || is_dynamically_loaded_code_or_rodata_ptr((char *)p) ) @@ -351,11 +414,6 @@ extern void* DATA_SECTION_END_MARKER_DECL; || is_dynamically_loaded_rwdata_ptr((char *)p) ) #define IS_USER_PTR(p) ( ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER) \ && is_not_dynamically_loaded_ptr((char *)p) ) -#else -#define IS_CODE_PTR(p) ((P_)(p) < (P_)&TEXT_SECTION_END_MARKER) -#define IS_DATA_PTR(p) ((P_)(p) >= (P_)&TEXT_SECTION_END_MARKER && (P_)(p) < (P_)&DATA_SECTION_END_MARKER) -#define IS_USER_PTR(p) ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER) -#endif /* The HEAP_ALLOCED test below is called FOR EVERY SINGLE CLOSURE * during GC. It needs to be FAST. @@ -456,7 +514,7 @@ extern int is_heap_alloced(const void* x); LOOKS_LIKE_STATIC() - distinguishes between static and heap allocated data. */ -#if defined(ENABLE_WIN32_DLL_SUPPORT) && !defined(INTERPRETER) +#if defined(ENABLE_WIN32_DLL_SUPPORT) /* definitely do not enable for mingw DietHEP */ #define LOOKS_LIKE_STATIC(r) (!(HEAP_ALLOCED(r))) @@ -486,20 +544,9 @@ extern int is_heap_alloced(const void* x); infotables for constructors on the (writable) C heap. -------------------------------------------------------------------------- */ -#ifdef INTERPRETER -# ifdef USE_MINIINTERPRETER - /* yoiks: one of the dreaded pointer equality tests */ -# define IS_HUGS_CONSTR_INFO(info) \ - (((StgInfoTable *)(info))->entry == (StgFunPtr)&Hugs_CONSTR_entry) -# else -# define IS_HUGS_CONSTR_INFO(info) 0 /* ToDo: more than mildly bogus */ -# endif -#elif GHCI - /* not accurate by any means, but stops the assertions failing... */ -# define IS_HUGS_CONSTR_INFO(info) IS_USER_PTR(info) -#else -# define IS_HUGS_CONSTR_INFO(info) 0 /* ToDo: more than mildly bogus */ -#endif +/* not accurate by any means, but stops the assertions failing... */ +/* TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO */ +#define IS_HUGS_CONSTR_INFO(info) IS_USER_PTR(info) /* LOOKS_LIKE_GHC_INFO is called moderately often during GC, but * Certainly not as often as HEAP_ALLOCED. @@ -516,16 +563,13 @@ extern int is_heap_alloced(const void* x); Macros for calculating how big a closure will be (used during allocation) -------------------------------------------------------------------------- */ -/* ToDo: replace unsigned int by nat. The only fly in the ointment is that - * nat comes from Rts.h which many folk dont include. Sigh! - */ -static __inline__ StgOffset AP_sizeW ( unsigned int n_args ) +static __inline__ StgOffset AP_sizeW ( nat n_args ) { return sizeofW(StgAP_UPD) + n_args; } -static __inline__ StgOffset PAP_sizeW ( unsigned int n_args ) +static __inline__ StgOffset PAP_sizeW ( nat n_args ) { return sizeofW(StgPAP) + n_args; } -static __inline__ StgOffset CONSTR_sizeW( unsigned int p, unsigned int np ) +static __inline__ StgOffset CONSTR_sizeW( nat p, nat np ) { return sizeofW(StgHeader) + p + np; } static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void ) @@ -534,9 +578,6 @@ static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void ) static __inline__ StgOffset BLACKHOLE_sizeW ( void ) { return sizeofW(StgHeader) + MIN_UPD_SIZE; } -static __inline__ StgOffset CAF_sizeW ( void ) -{ return sizeofW(StgCAF); } - /* -------------------------------------------------------------------------- * Sizes of closures * ------------------------------------------------------------------------*/ @@ -558,5 +599,5 @@ static __inline__ StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x ) static __inline__ StgWord tso_sizeW ( StgTSO *tso ) { return TSO_STRUCT_SIZEW + tso->stack_size; } -#endif STORAGE_H +#endif /* STORAGE_H */