/* -----------------------------------------------------------------------------
- * $Id: Storage.h,v 1.53 2003/11/12 17:49:11 sof Exp $
*
- * (c) The GHC Team, 1998-2002
+ * (c) The GHC Team, 1998-2004
*
* External Storage Manger Interface
*
#ifndef STORAGE_H
#define STORAGE_H
-#include "Block.h"
-#include "MBlock.h"
-#include "BlockAlloc.h"
-#include "StoragePriv.h"
-#ifdef PROFILING
-#include "LdvProfile.h"
-#endif
+#include <stddef.h>
+
+/* -----------------------------------------------------------------------------
+ * Generational GC
+ *
+ * We support an arbitrary number of generations, with an arbitrary number
+ * of steps per generation. Notes (in no particular order):
+ *
+ * - all generations except the oldest should have two steps. This gives
+ * objects a decent chance to age before being promoted, and in
+ * particular will ensure that we don't end up with too many
+ * thunks being updated in older generations.
+ *
+ * - the oldest generation has one step. There's no point in aging
+ * objects in the oldest generation.
+ *
+ * - generation 0, step 0 (G0S0) is the allocation area. It is given
+ * a fixed set of blocks during initialisation, and these blocks
+ * are never freed.
+ *
+ * - during garbage collection, each step which is an evacuation
+ * destination (i.e. all steps except G0S0) is allocated a to-space.
+ * evacuated objects are allocated into the step's to-space until
+ * GC is finished, when the original step's contents may be freed
+ * and replaced by the to-space.
+ *
+ * - the mutable-list is per-generation (not per-step). G0 doesn't
+ * have one (since every garbage collection collects at least G0).
+ *
+ * - block descriptors contain pointers to both the step and the
+ * generation that the block belongs to, for convenience.
+ *
+ * - static objects are stored in per-generation lists. See GC.c for
+ * details of how we collect CAFs in the generational scheme.
+ *
+ * - large objects are per-step, and are promoted in the same way
+ * as small objects, except that we may allocate large objects into
+ * generation 1 initially.
+ *
+ * ------------------------------------------------------------------------- */
+
+typedef struct _step {
+ unsigned int no; /* step number */
+ bdescr * blocks; /* blocks in this step */
+ unsigned int n_blocks; /* number of blocks */
+ struct _step * to; /* destination step for live objects */
+ struct _generation * gen; /* generation this step belongs to */
+ unsigned int gen_no; /* generation number (cached) */
+ bdescr * large_objects; /* large objects (doubly linked) */
+ unsigned int n_large_blocks; /* no. of blocks used by large objs */
+ int is_compacted; /* compact this step? (old gen only) */
+
+ /* temporary use during GC: */
+ StgPtr hp; /* next free locn in to-space */
+ StgPtr hpLim; /* end of current to-space block */
+ bdescr * hp_bd; /* bdescr of current to-space block */
+ bdescr * to_blocks; /* bdescr of first to-space block */
+ unsigned int n_to_blocks; /* number of blocks in to-space */
+ bdescr * scan_bd; /* block currently being scanned */
+ StgPtr scan; /* scan pointer in current block */
+ bdescr * new_large_objects; /* large objects collected so far */
+ bdescr * scavenged_large_objects; /* live large objs after GC (d-link) */
+ unsigned int n_scavenged_large_blocks;/* size of above */
+ bdescr * bitmap; /* bitmap for compacting collection */
+} step;
+
+typedef struct _generation {
+ unsigned int no; /* generation number */
+ step * steps; /* steps */
+ unsigned int n_steps; /* number of steps */
+ unsigned int max_blocks; /* max blocks in step 0 */
+ StgMutClosure *mut_list; /* mut objects in this gen (not G0)*/
+ StgMutClosure *mut_once_list; /* objects that point to younger gens */
+
+ /* temporary use during GC: */
+ StgMutClosure * saved_mut_list;
+
+ /* stats information */
+ unsigned int collections;
+ unsigned int failed_promotions;
+} generation;
+
+extern generation * RTS_VAR(generations);
+
+extern generation * RTS_VAR(g0);
+extern step * RTS_VAR(g0s0);
+extern generation * RTS_VAR(oldest_gen);
/* -----------------------------------------------------------------------------
Initialisation / De-initialisation
extern StgPtr allocatePinned ( nat n );
extern lnat allocated_bytes ( void );
+extern bdescr * RTS_VAR(small_alloc_list);
+extern bdescr * RTS_VAR(large_alloc_list);
+extern bdescr * RTS_VAR(pinned_object_block);
+
+extern StgPtr RTS_VAR(alloc_Hp);
+extern StgPtr RTS_VAR(alloc_HpLim);
+
+extern nat RTS_VAR(alloc_blocks);
+extern nat RTS_VAR(alloc_blocks_lim);
+
INLINE_HEADER rtsBool
doYouWantToGC( void )
{
}
/* -----------------------------------------------------------------------------
- ExtendNursery(hp,hplim) When hplim is reached, try to grab
- some more allocation space. Returns
- False if the allocation space is
- exhausted, and the application should
- call GarbageCollect().
- -------------------------------------------------------------------------- */
-
-#define ExtendNursery(hp,hplim) \
- (CloseNursery(hp), \
- CurrentNursery->link == NULL ? rtsFalse : \
- (CurrentNursery = CurrentNursery->link, \
- OpenNursery(hp,hplim), \
- rtsTrue))
-
-/* -----------------------------------------------------------------------------
Performing Garbage Collection
GarbageCollect(get_roots) Performs a garbage collection.
bd = Bdescr((P_)p);
if (bd->gen_no > 0) {
- p->mut_link = generations[bd->gen_no].mut_list;
- generations[bd->gen_no].mut_list = p;
+ p->mut_link = RTS_DEREF(generations)[bd->gen_no].mut_list;
+ RTS_DEREF(generations)[bd->gen_no].mut_list = p;
}
}
bd = Bdescr((P_)p);
if (bd->gen_no > 0) {
- p->mut_link = generations[bd->gen_no].mut_once_list;
- generations[bd->gen_no].mut_once_list = p;
- }
-}
-
-// @LDV profiling
-// We zero out the slop when PROFILING is on.
-// #ifndef DEBUG
-#if !defined(DEBUG) && !defined(PROFILING)
-#define updateWithIndirection(info, ind_info, p1, p2, and_then) \
- { \
- bdescr *bd; \
- \
- bd = Bdescr((P_)p1); \
- if (bd->gen_no == 0) { \
- ((StgInd *)p1)->indirectee = p2; \
- SET_INFO(p1,ind_info); \
- TICK_UPD_NEW_IND(); \
- and_then; \
- } else { \
- ((StgIndOldGen *)p1)->indirectee = p2; \
- if (info != &stg_BLACKHOLE_BQ_info) { \
- ACQUIRE_SM_LOCK; \
- ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
- generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
- RELEASE_SM_LOCK; \
- } \
- SET_INFO(p1,&stg_IND_OLDGEN_info); \
- TICK_UPD_OLD_IND(); \
- and_then; \
- } \
- }
-#elif defined(PROFILING)
-// @LDV profiling
-// We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in
-// which p1 resides.
-//
-// Note:
-// After all, we do *NOT* need to call LDV_recordCreate() for both IND and
-// IND_OLDGEN closures because they are inherently used. But, it corrupts
-// the invariants that every closure keeps its creation time in the profiling
-// field. So, we call LDV_recordCreate().
-
-#define updateWithIndirection(info, ind_info, p1, p2, and_then) \
- { \
- bdescr *bd; \
- \
- LDV_recordDead_FILL_SLOP_DYNAMIC((p1)); \
- bd = Bdescr((P_)p1); \
- if (bd->gen_no == 0) { \
- ((StgInd *)p1)->indirectee = p2; \
- SET_INFO(p1,ind_info); \
- LDV_recordCreate((p1)); \
- TICK_UPD_NEW_IND(); \
- and_then; \
- } else { \
- ((StgIndOldGen *)p1)->indirectee = p2; \
- if (info != &stg_BLACKHOLE_BQ_info) { \
- ACQUIRE_SM_LOCK; \
- ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
- generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
- RELEASE_SM_LOCK; \
- } \
- SET_INFO(p1,&stg_IND_OLDGEN_info); \
- LDV_recordCreate((p1)); \
- and_then; \
- } \
- }
-
-#else
-
-/* In the DEBUG case, we also zero out the slop of the old closure,
- * so that the sanity checker can tell where the next closure is.
- *
- * Two important invariants: we should never try to update a closure
- * to point to itself, and the closure being updated should not
- * already have been updated (the mutable list will get messed up
- * otherwise).
- */
-#define updateWithIndirection(info, ind_info, p1, p2, and_then) \
- { \
- bdescr *bd; \
- \
- ASSERT( p1 != p2 && !closure_IND(p1) ); \
- bd = Bdescr((P_)p1); \
- if (bd->gen_no == 0) { \
- ((StgInd *)p1)->indirectee = p2; \
- SET_INFO(p1,ind_info); \
- TICK_UPD_NEW_IND(); \
- and_then; \
- } else { \
- if (info != &stg_BLACKHOLE_BQ_info) { \
- { \
- StgInfoTable *inf = get_itbl(p1); \
- nat np = inf->layout.payload.ptrs, \
- nw = inf->layout.payload.nptrs, i; \
- if (inf->type != THUNK_SELECTOR) { \
- for (i = 0; i < np + nw; i++) { \
- ((StgClosure *)p1)->payload[i] = 0; \
- } \
- } \
- } \
- ACQUIRE_SM_LOCK; \
- ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
- generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
- RELEASE_SM_LOCK; \
- } \
- ((StgIndOldGen *)p1)->indirectee = p2; \
- SET_INFO(p1,&stg_IND_OLDGEN_info); \
- TICK_UPD_OLD_IND(); \
- and_then; \
- } \
- }
-#endif
-
-/* Static objects all live in the oldest generation
- */
-#define updateWithStaticIndirection(info, p1, p2) \
- { \
- ASSERT( p1 != p2 && !closure_IND(p1) ); \
- ASSERT( ((StgMutClosure*)p1)->mut_link == NULL ); \
- \
- ACQUIRE_SM_LOCK; \
- ((StgMutClosure *)p1)->mut_link = oldest_gen->mut_once_list; \
- oldest_gen->mut_once_list = (StgMutClosure *)p1; \
- RELEASE_SM_LOCK; \
- \
- ((StgInd *)p1)->indirectee = p2; \
- SET_INFO((StgInd *)p1, &stg_IND_STATIC_info); \
- TICK_UPD_STATIC_IND(); \
- }
-
-#if defined(TICKY_TICKY) || defined(PROFILING)
-INLINE_HEADER void
-updateWithPermIndirection(const StgInfoTable *info, StgClosure *p1, StgClosure *p2)
-{
- bdescr *bd;
-
- ASSERT( p1 != p2 && !closure_IND(p1) );
-
-#ifdef PROFILING
- // @LDV profiling
- // Destroy the old closure.
- // Nb: LDV_* stuff cannot mix with ticky-ticky
- LDV_recordDead_FILL_SLOP_DYNAMIC(p1);
-#endif
- bd = Bdescr((P_)p1);
- if (bd->gen_no == 0) {
- ((StgInd *)p1)->indirectee = p2;
- SET_INFO(p1,&stg_IND_PERM_info);
-#ifdef PROFILING
- // @LDV profiling
- // We have just created a new closure.
- LDV_recordCreate(p1);
-#endif
- TICK_UPD_NEW_PERM_IND(p1);
- } else {
- ((StgIndOldGen *)p1)->indirectee = p2;
- if (info != &stg_BLACKHOLE_BQ_info) {
- ACQUIRE_SM_LOCK;
- ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
- generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
- RELEASE_SM_LOCK;
- }
- SET_INFO(p1,&stg_IND_OLDGEN_PERM_info);
-#ifdef PROFILING
- // @LDV profiling
- // We have just created a new closure.
- LDV_recordCreate(p1);
-#endif
- TICK_UPD_OLD_PERM_IND();
+ p->mut_link = RTS_DEREF(generations)[bd->gen_no].mut_once_list;
+ RTS_DEREF(generations)[bd->gen_no].mut_once_list = p;
}
}
-#endif
/* -----------------------------------------------------------------------------
The CAF table - used to let us revert CAFs in GHCi
StgRetDyn *dyn = (StgRetDyn *)frame;
return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
RET_DYN_NONPTR_REGS_SIZE +
- GET_PTRS(dyn->liveness) + GET_NONPTRS(dyn->liveness);
+ RET_DYN_PTRS(dyn->liveness) + RET_DYN_NONPTRS(dyn->liveness);
}
case RET_FUN:
}
/* -----------------------------------------------------------------------------
- Debugging bits
+ Nursery manipulation
+ -------------------------------------------------------------------------- */
+
+extern void allocNurseries ( void );
+extern void resetNurseries ( void );
+extern bdescr * allocNursery ( bdescr *last_bd, nat blocks );
+extern void resizeNursery ( nat blocks );
+extern void tidyAllocateLists ( void );
+
+/* -----------------------------------------------------------------------------
+ MUTABLE LISTS
+ A mutable list is ended with END_MUT_LIST, so that we can use NULL
+ as an indication that an object is not on a mutable list.
+ ------------------------------------------------------------------------- */
+
+#define END_MUT_LIST ((StgMutClosure *)(void *)&stg_END_MUT_LIST_closure)
+
+/* -----------------------------------------------------------------------------
+ Functions from GC.c
+ -------------------------------------------------------------------------- */
+
+extern void threadPaused ( StgTSO * );
+extern StgClosure * isAlive ( StgClosure *p );
+extern void markCAFs ( evac_fn evac );
+
+/* -----------------------------------------------------------------------------
+ Stats 'n' DEBUG stuff
-------------------------------------------------------------------------- */
+extern lnat RTS_VAR(total_allocated);
+
+extern lnat calcAllocated ( void );
+extern lnat calcLive ( void );
+extern lnat calcNeeded ( void );
+
+#if defined(DEBUG)
+extern void memInventory(void);
+extern void checkSanity(void);
+extern nat countBlocks(bdescr *);
+#endif
+
#if defined(DEBUG)
void printMutOnceList(generation *gen);
void printMutableList(generation *gen);
#endif
+/* ----------------------------------------------------------------------------
+ Storage manager internal APIs and globals
+ ------------------------------------------------------------------------- */
+
+#define END_OF_STATIC_LIST stgCast(StgClosure*,1)
+
+extern void newDynCAF(StgClosure *);
+
+extern void move_TSO(StgTSO *src, StgTSO *dest);
+extern StgTSO *relocate_stack(StgTSO *dest, ptrdiff_t diff);
+
+extern StgClosure * RTS_VAR(static_objects);
+extern StgClosure * RTS_VAR(scavenged_static_objects);
+extern StgWeak * RTS_VAR(old_weak_ptr_list);
+extern StgWeak * RTS_VAR(weak_ptr_list);
+extern StgClosure * RTS_VAR(caf_list);
+extern StgTSO * RTS_VAR(resurrected_threads);
+
#endif // STORAGE_H