/* -----------------------------------------------------------------------------
- * $Id: Storage.h,v 1.24 2001/01/26 14:36:40 simonpj Exp $
+ * $Id: Storage.h,v 1.35 2001/07/24 06:31:36 ken Exp $
*
* (c) The GHC Team, 1998-1999
*
MarkRoot(StgClosure *p) Returns the new location of the root.
-------------------------------------------------------------------------- */
-extern void GarbageCollect(void (*get_roots)(void),rtsBool force_major_gc);
-extern StgClosure *MarkRoot(StgClosure *p);
-
-/* Temporary measure to ensure we retain all the dynamically-loaded CAFs */
-#ifdef GHCI
-extern void markCafs( void );
-#endif
+extern void GarbageCollect(void (*get_roots)(evac_fn),rtsBool force_major_gc);
/* -----------------------------------------------------------------------------
Generational garbage collection support
-------------------------------------------------------------------------- */
+/*
+ * Storage manager mutex
+ */
+#ifdef SMP
+extern pthread_mutex_t sm_mutex;
+#endif
+
/* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some
* kind of lock in the SMP case?
*/
#endif
bd = Bdescr((P_)p);
- if (bd->gen->no > 0) {
- p->mut_link = bd->gen->mut_list;
- bd->gen->mut_list = p;
+ if (bd->gen_no > 0) {
+ p->mut_link = generations[bd->gen_no].mut_list;
+ generations[bd->gen_no].mut_list = p;
}
}
bdescr *bd;
bd = Bdescr((P_)p);
- if (bd->gen->no > 0) {
- p->mut_link = bd->gen->mut_once_list;
- bd->gen->mut_once_list = p;
+ if (bd->gen_no > 0) {
+ p->mut_link = generations[bd->gen_no].mut_once_list;
+ generations[bd->gen_no].mut_once_list = p;
}
}
bdescr *bd; \
\
bd = Bdescr((P_)p1); \
- if (bd->gen->no == 0) { \
+ if (bd->gen_no == 0) { \
((StgInd *)p1)->indirectee = p2; \
SET_INFO(p1,&stg_IND_info); \
TICK_UPD_NEW_IND(); \
((StgIndOldGen *)p1)->indirectee = p2; \
if (info != &stg_BLACKHOLE_BQ_info) { \
ACQUIRE_LOCK(&sm_mutex); \
- ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; \
- bd->gen->mut_once_list = (StgMutClosure *)p1; \
+ ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
+ generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
RELEASE_LOCK(&sm_mutex); \
} \
SET_INFO(p1,&stg_IND_OLDGEN_info); \
/* In the DEBUG case, we also zero out the slop of the old closure,
* so that the sanity checker can tell where the next closure is.
+ *
+ * Two important invariants: we should never try to update a closure
+ * to point to itself, and the closure being updated should not
+ * already have been updated (the mutable list will get messed up
+ * otherwise).
*/
#define updateWithIndirection(info, p1, p2) \
{ \
bdescr *bd; \
\
+ ASSERT( p1 != p2 && !closure_IND(p1) ); \
bd = Bdescr((P_)p1); \
- if (bd->gen->no == 0) { \
+ if (bd->gen_no == 0) { \
((StgInd *)p1)->indirectee = p2; \
SET_INFO(p1,&stg_IND_info); \
TICK_UPD_NEW_IND(); \
} else { \
if (info != &stg_BLACKHOLE_BQ_info) { \
- { \
+ { \
StgInfoTable *inf = get_itbl(p1); \
nat np = inf->layout.payload.ptrs, \
nw = inf->layout.payload.nptrs, i; \
- for (i = np; i < np + nw; i++) { \
- ((StgClosure *)p1)->payload[i] = 0; \
+ if (inf->type != THUNK_SELECTOR) { \
+ for (i = np; i < np + nw; i++) { \
+ ((StgClosure *)p1)->payload[i] = 0; \
+ } \
} \
} \
ACQUIRE_LOCK(&sm_mutex); \
- ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list; \
- bd->gen->mut_once_list = (StgMutClosure *)p1; \
+ ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
+ generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
RELEASE_LOCK(&sm_mutex); \
} \
((StgIndOldGen *)p1)->indirectee = p2; \
*/
#define updateWithStaticIndirection(info, p1, p2) \
{ \
+ ASSERT( p1 != p2 && !closure_IND(p1) ); \
ASSERT( ((StgMutClosure*)p1)->mut_link == NULL ); \
\
ACQUIRE_LOCK(&sm_mutex); \
{
bdescr *bd;
+ ASSERT( p1 != p2 && !closure_IND(p1) );
bd = Bdescr((P_)p1);
- if (bd->gen->no == 0) {
+ if (bd->gen_no == 0) {
((StgInd *)p1)->indirectee = p2;
SET_INFO(p1,&stg_IND_PERM_info);
TICK_UPD_NEW_PERM_IND(p1);
((StgIndOldGen *)p1)->indirectee = p2;
if (info != &stg_BLACKHOLE_BQ_info) {
ACQUIRE_LOCK(&sm_mutex);
- ((StgIndOldGen *)p1)->mut_link = bd->gen->mut_once_list;
- bd->gen->mut_once_list = (StgMutClosure *)p1;
+ ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
+ generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
RELEASE_LOCK(&sm_mutex);
}
SET_INFO(p1,&stg_IND_OLDGEN_PERM_info);
The CAF table - used to let us revert CAFs
-------------------------------------------------------------------------- */
-#if defined(INTERPRETER)
-typedef struct StgCAFTabEntry_ {
- StgClosure* closure;
- StgInfoTable* origItbl;
-} StgCAFTabEntry;
-
-extern void addToECafTable ( StgClosure* closure, StgInfoTable* origItbl );
-extern void clearECafTable ( void );
-
-extern StgCAF* ecafList;
-extern StgCAFTabEntry* ecafTable;
-extern StgInt usedECafTable;
-extern StgInt sizeECafTable;
-#endif
+void revertCAFs( void );
#if defined(DEBUG)
void printMutOnceList(generation *gen);
void printMutableList(generation *gen);
-#endif DEBUG
+#endif /* DEBUG */
/* --------------------------------------------------------------------------
Address space layout macros
extern void* TEXT_SECTION_END_MARKER_DECL;
extern void* DATA_SECTION_END_MARKER_DECL;
-#if defined(INTERPRETER) || defined(GHCI)
/* Take into account code sections in dynamically loaded object files. */
#define IS_CODE_PTR(p) ( ((P_)(p) < (P_)&TEXT_SECTION_END_MARKER) \
|| is_dynamically_loaded_code_or_rodata_ptr((char *)p) )
|| is_dynamically_loaded_rwdata_ptr((char *)p) )
#define IS_USER_PTR(p) ( ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER) \
&& is_not_dynamically_loaded_ptr((char *)p) )
-#else
-#define IS_CODE_PTR(p) ((P_)(p) < (P_)&TEXT_SECTION_END_MARKER)
-#define IS_DATA_PTR(p) ((P_)(p) >= (P_)&TEXT_SECTION_END_MARKER && (P_)(p) < (P_)&DATA_SECTION_END_MARKER)
-#define IS_USER_PTR(p) ((P_)(p) >= (P_)&DATA_SECTION_END_MARKER)
-#endif
/* The HEAP_ALLOCED test below is called FOR EVERY SINGLE CLOSURE
* during GC. It needs to be FAST.
LOOKS_LIKE_STATIC()
- distinguishes between static and heap allocated data.
*/
-#if defined(ENABLE_WIN32_DLL_SUPPORT) && !defined(INTERPRETER)
+#if defined(ENABLE_WIN32_DLL_SUPPORT)
/* definitely do not enable for mingw DietHEP */
#define LOOKS_LIKE_STATIC(r) (!(HEAP_ALLOCED(r)))
infotables for constructors on the (writable) C heap.
-------------------------------------------------------------------------- */
-#ifdef INTERPRETER
-# ifdef USE_MINIINTERPRETER
- /* yoiks: one of the dreaded pointer equality tests */
-# define IS_HUGS_CONSTR_INFO(info) \
- (((StgInfoTable *)(info))->entry == (StgFunPtr)&Hugs_CONSTR_entry)
-# else
-# define IS_HUGS_CONSTR_INFO(info) 0 /* ToDo: more than mildly bogus */
-# endif
-#elif GHCI
- /* not accurate by any means, but stops the assertions failing... */
-# define IS_HUGS_CONSTR_INFO(info) IS_USER_PTR(info)
-#else
-# define IS_HUGS_CONSTR_INFO(info) 0 /* ToDo: more than mildly bogus */
-#endif
+/* not accurate by any means, but stops the assertions failing... */
+/* TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO */
+#define IS_HUGS_CONSTR_INFO(info) IS_USER_PTR(info)
/* LOOKS_LIKE_GHC_INFO is called moderately often during GC, but
* Certainly not as often as HEAP_ALLOCED.
Macros for calculating how big a closure will be (used during allocation)
-------------------------------------------------------------------------- */
-/* ToDo: replace unsigned int by nat. The only fly in the ointment is that
- * nat comes from Rts.h which many folk dont include. Sigh!
- */
-static __inline__ StgOffset AP_sizeW ( unsigned int n_args )
+static __inline__ StgOffset AP_sizeW ( nat n_args )
{ return sizeofW(StgAP_UPD) + n_args; }
-static __inline__ StgOffset PAP_sizeW ( unsigned int n_args )
+static __inline__ StgOffset PAP_sizeW ( nat n_args )
{ return sizeofW(StgPAP) + n_args; }
-static __inline__ StgOffset CONSTR_sizeW( unsigned int p, unsigned int np )
+static __inline__ StgOffset CONSTR_sizeW( nat p, nat np )
{ return sizeofW(StgHeader) + p + np; }
static __inline__ StgOffset THUNK_SELECTOR_sizeW ( void )
static __inline__ StgOffset BLACKHOLE_sizeW ( void )
{ return sizeofW(StgHeader) + MIN_UPD_SIZE; }
-static __inline__ StgOffset CAF_sizeW ( void )
-{ return sizeofW(StgCAF); }
-
/* --------------------------------------------------------------------------
* Sizes of closures
* ------------------------------------------------------------------------*/
static __inline__ StgWord tso_sizeW ( StgTSO *tso )
{ return TSO_STRUCT_SIZEW + tso->stack_size; }
-#endif STORAGE_H
+#endif /* STORAGE_H */