X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.h;h=c938fd8244a4b7ea41b2e6679d33809324ae0976;hb=3c6b9911369deda84fcc74a31372e6f51e0cb054;hp=4ba2731a27b8a793f462043d71370a2c001ba5c5;hpb=c887767d3ffa9d5ce292d38b0c091b6a23d66957;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.h b/ghc/rts/Storage.h index 4ba2731..c938fd8 100644 --- a/ghc/rts/Storage.h +++ b/ghc/rts/Storage.h @@ -1,7 +1,7 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.h,v 1.20 2000/12/19 12:51:58 simonmar Exp $ + * $Id: Storage.h,v 1.52 2003/04/22 16:25:12 simonmar Exp $ * - * (c) The GHC Team, 1998-1999 + * (c) The GHC Team, 1998-2002 * * External Storage Manger Interface * @@ -11,8 +11,12 @@ #define STORAGE_H #include "Block.h" +#include "MBlock.h" #include "BlockAlloc.h" #include "StoragePriv.h" +#ifdef PROFILING +#include "LdvProfile.h" +#endif /* ----------------------------------------------------------------------------- Initialisation / De-initialisation @@ -24,12 +28,24 @@ extern void exitStorage(void); /* ----------------------------------------------------------------------------- Generic allocation - StgPtr allocate(int n) Allocates a chunk of contiguous store + StgPtr allocate(nat n) Allocates a chunk of contiguous store n words long, returning a pointer to the first word. Always succeeds. + StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store + n words long, which is at a fixed + address (won't be moved by GC). + Returns a pointer to the first word. + Always succeeds. + + NOTE: the GC can't in general handle + pinned objects, so allocatePinned() + can only be used for ByteArrays at the + moment. + Don't forget to TICK_ALLOC_XXX(...) - after calling allocate, for the + after calling allocate or + allocatePinned, for the benefit of the ticky-ticky profiler. rtsBool doYouWantToGC(void) Returns True if the storage manager is @@ -37,18 +53,21 @@ extern void exitStorage(void); lnat allocated_bytes(void) Returns the number of bytes allocated via allocate() since the last GC. - Used in the reoprting of statistics. + Used in the reporting of statistics. SMP: allocate and doYouWantToGC can be used from STG code, they are surrounded by a mutex. -------------------------------------------------------------------------- */ -extern StgPtr allocate(nat n); -static inline rtsBool doYouWantToGC(void) +extern StgPtr allocate ( nat n ); +extern StgPtr allocatePinned ( nat n ); +extern lnat allocated_bytes ( void ); + +static inline rtsBool +doYouWantToGC( void ) { return (alloc_blocks >= alloc_blocks_lim); } -extern lnat allocated_bytes(void); /* ----------------------------------------------------------------------------- ExtendNursery(hp,hplim) When hplim is reached, try to grab @@ -59,14 +78,12 @@ extern lnat allocated_bytes(void); -------------------------------------------------------------------------- */ #define ExtendNursery(hp,hplim) \ - (CurrentNursery->free = (P_)(hp)+1, \ + (CloseNursery(hp), \ CurrentNursery->link == NULL ? rtsFalse : \ (CurrentNursery = CurrentNursery->link, \ OpenNursery(hp,hplim), \ rtsTrue)) -extern void PleaseStopAllocating(void); - /* ----------------------------------------------------------------------------- Performing Garbage Collection @@ -78,8 +95,7 @@ extern void PleaseStopAllocating(void); MarkRoot(StgClosure *p) Returns the new location of the root. -------------------------------------------------------------------------- */ -extern void GarbageCollect(void (*get_roots)(void),rtsBool force_major_gc); -extern StgClosure *MarkRoot(StgClosure *p); +extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc); /* ----------------------------------------------------------------------------- Generational garbage collection support @@ -98,6 +114,21 @@ extern StgClosure *MarkRoot(StgClosure *p); -------------------------------------------------------------------------- */ +/* + * Storage manager mutex + */ +#if defined(SMP) +extern Mutex sm_mutex; +#define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex) +#define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex) +#else +#define ACQUIRE_SM_LOCK +#define RELEASE_SM_LOCK +#endif + +/* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some + * kind of lock in the SMP case? + */ static inline void recordMutable(StgMutClosure *p) { @@ -110,9 +141,9 @@ recordMutable(StgMutClosure *p) #endif bd = Bdescr((P_)p); - if (bd->gen->no > 0) { - p->mut_link = bd->gen->mut_list; - bd->gen->mut_list = p; + if (bd->gen_no > 0) { + p->mut_link = generations[bd->gen_no].mut_list; + generations[bd->gen_no].mut_list = p; } } @@ -122,266 +153,243 @@ recordOldToNewPtrs(StgMutClosure *p) bdescr *bd; bd = Bdescr((P_)p); - if (bd->gen->no > 0) { - p->mut_link = bd->gen->mut_once_list; - bd->gen->mut_once_list = p; + if (bd->gen_no > 0) { + p->mut_link = generations[bd->gen_no].mut_once_list; + generations[bd->gen_no].mut_once_list = p; } } -#ifndef DEBUG -#define updateWithIndirection(info, p1, p2) \ +// @LDV profiling +// We zero out the slop when PROFILING is on. +// #ifndef DEBUG +#if !defined(DEBUG) && !defined(PROFILING) +#define updateWithIndirection(info, ind_info, p1, p2, and_then) \ { \ bdescr *bd; \ \ bd = Bdescr((P_)p1); \ - if (bd->gen->no == 0) { \ + if (bd->gen_no == 0) { \ ((StgInd *)p1)->indirectee = p2; \ - SET_INFO(p1,&stg_IND_info); \ + SET_INFO(p1,ind_info); \ TICK_UPD_NEW_IND(); \ + and_then; \ } else { \ ((StgIndOldGen *)p1)->indirectee = p2; \ if (info != &stg_BLACKHOLE_BQ_info) { \ - ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; \ - bd->gen->mut_once_list = (StgMutClosure *)p1; \ + ACQUIRE_SM_LOCK; \ + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \ + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \ + RELEASE_SM_LOCK; \ } \ SET_INFO(p1,&stg_IND_OLDGEN_info); \ TICK_UPD_OLD_IND(); \ + and_then; \ + } \ + } +#elif defined(PROFILING) +// @LDV profiling +// We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in +// which p1 resides. +// +// Note: +// After all, we do *NOT* need to call LDV_recordCreate() for both IND and +// IND_OLDGEN closures because they are inherently used. But, it corrupts +// the invariants that every closure keeps its creation time in the profiling +// field. So, we call LDV_recordCreate(). + +#define updateWithIndirection(info, ind_info, p1, p2, and_then) \ + { \ + bdescr *bd; \ + \ + LDV_recordDead_FILL_SLOP_DYNAMIC((p1)); \ + bd = Bdescr((P_)p1); \ + if (bd->gen_no == 0) { \ + ((StgInd *)p1)->indirectee = p2; \ + SET_INFO(p1,ind_info); \ + LDV_recordCreate((p1)); \ + TICK_UPD_NEW_IND(); \ + and_then; \ + } else { \ + ((StgIndOldGen *)p1)->indirectee = p2; \ + if (info != &stg_BLACKHOLE_BQ_info) { \ + ACQUIRE_SM_LOCK; \ + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \ + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \ + RELEASE_SM_LOCK; \ + } \ + SET_INFO(p1,&stg_IND_OLDGEN_info); \ + LDV_recordCreate((p1)); \ + and_then; \ } \ } + #else /* In the DEBUG case, we also zero out the slop of the old closure, * so that the sanity checker can tell where the next closure is. + * + * Two important invariants: we should never try to update a closure + * to point to itself, and the closure being updated should not + * already have been updated (the mutable list will get messed up + * otherwise). */ -#define updateWithIndirection(info, p1, p2) \ +#define updateWithIndirection(info, ind_info, p1, p2, and_then) \ { \ bdescr *bd; \ \ + ASSERT( p1 != p2 && !closure_IND(p1) ); \ bd = Bdescr((P_)p1); \ - if (bd->gen->no == 0) { \ + if (bd->gen_no == 0) { \ ((StgInd *)p1)->indirectee = p2; \ - SET_INFO(p1,&stg_IND_info); \ + SET_INFO(p1,ind_info); \ TICK_UPD_NEW_IND(); \ + and_then; \ } else { \ if (info != &stg_BLACKHOLE_BQ_info) { \ - { \ + { \ StgInfoTable *inf = get_itbl(p1); \ nat np = inf->layout.payload.ptrs, \ nw = inf->layout.payload.nptrs, i; \ - for (i = np; i < np + nw; i++) { \ - ((StgClosure *)p1)->payload[i] = 0; \ + if (inf->type != THUNK_SELECTOR) { \ + for (i = 0; i < np + nw; i++) { \ + ((StgClosure *)p1)->payload[i] = 0; \ + } \ } \ } \ - ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; \ - bd->gen->mut_once_list = (StgMutClosure *)p1; \ + ACQUIRE_SM_LOCK; \ + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \ + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \ + RELEASE_SM_LOCK; \ } \ ((StgIndOldGen *)p1)->indirectee = p2; \ SET_INFO(p1,&stg_IND_OLDGEN_info); \ TICK_UPD_OLD_IND(); \ + and_then; \ } \ } #endif +/* Static objects all live in the oldest generation + */ +#define updateWithStaticIndirection(info, p1, p2) \ + { \ + ASSERT( p1 != p2 && !closure_IND(p1) ); \ + ASSERT( ((StgMutClosure*)p1)->mut_link == NULL ); \ + \ + ACQUIRE_SM_LOCK; \ + ((StgMutClosure *)p1)->mut_link = oldest_gen->mut_once_list; \ + oldest_gen->mut_once_list = (StgMutClosure *)p1; \ + RELEASE_SM_LOCK; \ + \ + ((StgInd *)p1)->indirectee = p2; \ + SET_INFO((StgInd *)p1, &stg_IND_STATIC_info); \ + TICK_UPD_STATIC_IND(); \ + } + #if defined(TICKY_TICKY) || defined(PROFILING) static inline void updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *p2) { bdescr *bd; + ASSERT( p1 != p2 && !closure_IND(p1) ); + +#ifdef PROFILING + // @LDV profiling + // Destroy the old closure. + // Nb: LDV_* stuff cannot mix with ticky-ticky + LDV_recordDead_FILL_SLOP_DYNAMIC(p1); +#endif bd = Bdescr((P_)p1); - if (bd->gen->no == 0) { + if (bd->gen_no == 0) { ((StgInd *)p1)->indirectee = p2; SET_INFO(p1,&stg_IND_PERM_info); +#ifdef PROFILING + // @LDV profiling + // We have just created a new closure. + LDV_recordCreate(p1); +#endif TICK_UPD_NEW_PERM_IND(p1); } else { ((StgIndOldGen *)p1)->indirectee = p2; if (info != &stg_BLACKHOLE_BQ_info) { - ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; - bd->gen->mut_once_list = (StgMutClosure *)p1; + ACQUIRE_SM_LOCK; + ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; + generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; + RELEASE_SM_LOCK; } SET_INFO(p1,&stg_IND_OLDGEN_PERM_info); +#ifdef PROFILING + // @LDV profiling + // We have just created a new closure. + LDV_recordCreate(p1); +#endif TICK_UPD_OLD_PERM_IND(); } } #endif /* ----------------------------------------------------------------------------- - The CAF table - used to let us revert CAFs + The CAF table - used to let us revert CAFs in GHCi -------------------------------------------------------------------------- */ -#if defined(INTERPRETER) -typedef struct StgCAFTabEntry_ { - StgClosure* closure; - StgInfoTable* origItbl; -} StgCAFTabEntry; - -extern void addToECafTable ( StgClosure* closure, StgInfoTable* origItbl ); -extern void clearECafTable ( void ); - -extern StgCAF* ecafList; -extern StgCAFTabEntry* ecafTable; -extern StgInt usedECafTable; -extern StgInt sizeECafTable; -#endif - -#if defined(DEBUG) -void printMutOnceList(generation *gen); -void printMutableList(generation *gen); -#endif DEBUG +void revertCAFs( void ); /* ----------------------------------------------------------------------------- - Macros for distinguishing data pointers from code pointers - -------------------------------------------------------------------------- */ -/* - * We use some symbols inserted automatically by the linker to decide - * whether a pointer points to text, data, or user space. These tests - * assume that text is lower in the address space than data, which in - * turn is lower than user allocated memory. - * - * If this assumption is false (say on some strange architecture) then - * the tests IS_CODE_PTR and IS_DATA_PTR below will need to be - * modified (and that should be all that's necessary). - * - * _start } start of read-only text space - * _etext } end of read-only text space - * _end } end of read-write data space - */ -extern StgFun start; - -extern void* TEXT_SECTION_END_MARKER_DECL; -extern void* DATA_SECTION_END_MARKER_DECL; - -#if defined(INTERPRETER) || defined(GHCI) -/* Take into account code sections in dynamically loaded object files. */ -#define IS_CODE_PTR(p) ( ((P_)(p) < (P_)&TEXT_SECTION_END_MARKER) \ - || is_dynamically_loaded_code_or_rodata_ptr((char *)p) ) -#define IS_DATA_PTR(p) ( ((P_)(p) >= (P_)&TEXT_SECTION_END_MARKER && \ - (P_)(p) < (P_)&DATA_SECTION_END_MARKER) \ - || is_dynamically_loaded_rwdata_ptr((char *)p) ) -#define IS_USER_PTR(p) ( ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER) \ - && is_not_dynamically_loaded_ptr((char *)p) ) -#else -#define IS_CODE_PTR(p) ((P_)(p) < (P_)&TEXT_SECTION_END_MARKER) -#define IS_DATA_PTR(p) ((P_)(p) >= (P_)&TEXT_SECTION_END_MARKER && (P_)(p) < (P_)&DATA_SECTION_END_MARKER) -#define IS_USER_PTR(p) ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER) -#endif + DEBUGGING predicates for pointers -/* The HEAP_ALLOCED test below is called FOR EVERY SINGLE CLOSURE - * during GC. It needs to be FAST. - */ -#ifdef TEXT_BEFORE_HEAP -# define HEAP_ALLOCED(x) ((StgPtr)(x) >= (StgPtr)(HEAP_BASE)) -#else -extern int is_heap_alloced(const void* x); -# define HEAP_ALLOCED(x) (is_heap_alloced(x)) -#endif - -/* When working with Win32 DLLs, static closures are identified by - being prefixed with a zero word. This is needed so that we can - distinguish between pointers to static closures and (reversed!) - info tables. - - This 'scheme' breaks down for closure tables such as CHARLIKE, - so we catch these separately. - - LOOKS_LIKE_STATIC_CLOSURE() - - discriminates between static closures and info tbls - (needed by LOOKS_LIKE_GHC_INFO() below - [Win32 DLLs only.]) - LOOKS_LIKE_STATIC() - - distinguishes between static and heap allocated data. - */ -#if defined(ENABLE_WIN32_DLL_SUPPORT) && !defined(INTERPRETER) - /* definitely do not enable for mingw DietHEP */ -#define LOOKS_LIKE_STATIC(r) (!(HEAP_ALLOCED(r))) - -/* Tiresome predicates needed to check for pointers into the closure tables */ -#define IS_CHARLIKE_CLOSURE(p) \ - ( (P_)(p) >= (P_)stg_CHARLIKE_closure && \ - (char*)(p) <= ((char*)stg_CHARLIKE_closure + \ - (MAX_CHARLIKE-MIN_CHARLIKE) * sizeof(StgIntCharlikeClosure)) ) -#define IS_INTLIKE_CLOSURE(p) \ - ( (P_)(p) >= (P_)stg_INTLIKE_closure && \ - (char*)(p) <= ((char*)stg_INTLIKE_closure + \ - (MAX_INTLIKE-MIN_INTLIKE) * sizeof(StgIntCharlikeClosure)) ) - -#define LOOKS_LIKE_STATIC_CLOSURE(r) (((*(((unsigned long *)(r))-1)) == 0) || IS_CHARLIKE_CLOSURE(r) || IS_INTLIKE_CLOSURE(r)) -#else -#define LOOKS_LIKE_STATIC(r) IS_DATA_PTR(r) -#define LOOKS_LIKE_STATIC_CLOSURE(r) IS_DATA_PTR(r) -#endif + LOOKS_LIKE_INFO_PTR(p) returns False if p is definitely not an info ptr + LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr + These macros are complete but not sound. That is, they might + return false positives. Do not rely on them to distinguish info + pointers from closure pointers, for example. -/* ----------------------------------------------------------------------------- - Macros for distinguishing infotables from closures. - - You'd think it'd be easy to tell an info pointer from a closure pointer: - closures live on the heap and infotables are in read only memory. Right? - Wrong! Static closures live in read only memory and Hugs allocates - infotables for constructors on the (writable) C heap. + We don't use address-space predicates these days, for portability + reasons, and the fact that code/data can be scattered about the + address space in a dynamically-linked environment. Our best option + is to look at the alleged info table and see whether it seems to + make sense... -------------------------------------------------------------------------- */ -#ifdef INTERPRETER -# ifdef USE_MINIINTERPRETER - /* yoiks: one of the dreaded pointer equality tests */ -# define IS_HUGS_CONSTR_INFO(info) \ - (((StgInfoTable *)(info))->entry == (StgFunPtr)&Hugs_CONSTR_entry) -# else -# define IS_HUGS_CONSTR_INFO(info) 0 /* ToDo: more than mildly bogus */ -# endif -#elif GHCI - /* not accurate by any means, but stops the assertions failing... */ -# define IS_HUGS_CONSTR_INFO(info) IS_USER_PTR(info) -#else -# define IS_HUGS_CONSTR_INFO(info) 0 /* ToDo: more than mildly bogus */ -#endif +#define LOOKS_LIKE_INFO_PTR(p) \ + (p && ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \ + ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES) -/* LOOKS_LIKE_GHC_INFO is called moderately often during GC, but - * Certainly not as often as HEAP_ALLOCED. - */ -#ifdef TEXT_BEFORE_HEAP /* needed for mingw DietHEP */ -# define LOOKS_LIKE_GHC_INFO(info) IS_CODE_PTR(info) -#else -# define LOOKS_LIKE_GHC_INFO(info) (!HEAP_ALLOCED(info) \ - && !LOOKS_LIKE_STATIC_CLOSURE(info)) -#endif +#define LOOKS_LIKE_CLOSURE_PTR(p) \ + (LOOKS_LIKE_INFO_PTR(((StgClosure *)(p))->header.info)) /* ----------------------------------------------------------------------------- Macros for calculating how big a closure will be (used during allocation) -------------------------------------------------------------------------- */ -/* ToDo: replace unsigned int by nat. The only fly in the ointment is that - * nat comes from Rts.h which many folk dont include. Sigh! - */ -static __inline__ StgOffset AP_sizeW ( unsigned int n_args ) -{ return sizeofW(StgAP_UPD) + n_args; } +static __inline__ StgOffset PAP_sizeW ( nat n_args ) +{ return sizeofW(StgPAP) + n_args; } -static __inline__ StgOffset PAP_sizeW ( unsigned int n_args ) -{ return sizeofW(StgPAP) + n_args; } +static __inline__ StgOffset AP_STACK_sizeW ( nat size ) +{ return sizeofW(StgAP_STACK) + size; } -static __inline__ StgOffset CONSTR_sizeW( unsigned int p, unsigned int np ) +static __inline__ StgOffset CONSTR_sizeW( nat p, nat np ) { return sizeofW(StgHeader) + p + np; } -static __inline__ StgOffset BCO_sizeW ( unsigned int p, unsigned int np, unsigned int is ) -{ return sizeofW(StgBCO) + p + np + (is+sizeof(StgWord)-1)/sizeof(StgWord); } - -static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void ) +static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void ) { return sizeofW(StgHeader) + MIN_UPD_SIZE; } -static __inline__ StgOffset BLACKHOLE_sizeW ( void ) +static __inline__ StgOffset BLACKHOLE_sizeW ( void ) { return sizeofW(StgHeader) + MIN_UPD_SIZE; } -static __inline__ StgOffset CAF_sizeW ( void ) -{ return sizeofW(StgCAF); } - /* -------------------------------------------------------------------------- - * Sizes of closures - * ------------------------------------------------------------------------*/ + Sizes of closures + ------------------------------------------------------------------------*/ static __inline__ StgOffset sizeW_fromITBL( const StgInfoTable* itbl ) { return sizeofW(StgClosure) + sizeofW(StgPtr) * itbl->layout.payload.ptrs + sizeofW(StgWord) * itbl->layout.payload.nptrs; } +static __inline__ StgOffset ap_stack_sizeW( StgAP_STACK* x ) +{ return AP_STACK_sizeW(x->size); } + static __inline__ StgOffset pap_sizeW( StgPAP* x ) { return PAP_sizeW(x->n_args); } @@ -394,5 +402,50 @@ static __inline__ StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x ) static __inline__ StgWord tso_sizeW ( StgTSO *tso ) { return TSO_STRUCT_SIZEW + tso->stack_size; } -#endif STORAGE_H +static __inline__ StgWord bco_sizeW ( StgBCO *bco ) +{ return bco->size; } + +/* ----------------------------------------------------------------------------- + Sizes of stack frames + -------------------------------------------------------------------------- */ + +static inline StgWord stack_frame_sizeW( StgClosure *frame ) +{ + StgRetInfoTable *info; + + info = get_ret_itbl(frame); + switch (info->i.type) { + + case RET_DYN: + { + StgRetDyn *dyn = (StgRetDyn *)frame; + return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE + + RET_DYN_NONPTR_REGS_SIZE + + GET_PTRS(dyn->liveness) + GET_NONPTRS(dyn->liveness); + } + + case RET_FUN: + return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size; + + case RET_BIG: + case RET_VEC_BIG: + return 1 + info->i.layout.large_bitmap->size; + + case RET_BCO: + return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]); + + default: + return 1 + BITMAP_SIZE(info->i.layout.bitmap); + } +} + +/* ----------------------------------------------------------------------------- + Debugging bits + -------------------------------------------------------------------------- */ + +#if defined(DEBUG) +void printMutOnceList(generation *gen); +void printMutableList(generation *gen); +#endif +#endif // STORAGE_H