typedef struct step_ {
unsigned int no; // step number in this generation
unsigned int abs_no; // absolute step number
- int is_compacted; // compact this step? (old gen only)
struct generation_ * gen; // generation this step belongs to
unsigned int gen_no; // generation number (cached)
bdescr * large_objects; // large objects (doubly linked)
unsigned int n_large_blocks; // no. of blocks used by large objs
+ StgTSO * threads; // threads in this step
+ // linked via global_link
// ------------------------------------
// Fields below are used during GC only
// and scavenged_large_objects
#endif
+ int mark; // mark (not copy)? (old gen only)
+ int compact; // compact (not sweep)? (old gen only)
+
bdescr * old_blocks; // bdescr of first from-space block
unsigned int n_old_blocks; // number of blocks in from-space
+ unsigned int live_estimate; // for sweeping: estimate of live data
bdescr * todos; // blocks waiting to be scavenged
bdescr * todos_last;
bdescr * bitmap; // bitmap for compacting collection
+ StgTSO * old_threads;
} step;
-------------------------------------------------------------------------- */
-extern StgPtr allocate ( nat n );
-extern StgPtr allocateInGen ( generation *g, nat n );
-extern StgPtr allocateLocal ( Capability *cap, nat n );
-extern StgPtr allocatePinned ( nat n );
+extern StgPtr allocate ( lnat n );
+extern StgPtr allocateInGen ( generation *g, lnat n );
+extern StgPtr allocateLocal ( Capability *cap, lnat n );
+extern StgPtr allocatePinned ( lnat n );
extern lnat allocatedBytes ( void );
extern bdescr * RTS_VAR(small_alloc_list);
}
/* memory allocator for executable memory */
-extern void *allocateExec (nat bytes);
+extern void* allocateExec(unsigned int len, void **exec_addr);
extern void freeExec (void *p);
/* for splitting blocks groups in two */
-------------------------------------------------------------------------- */
-extern void GarbageCollect(rtsBool force_major_gc);
+extern void GarbageCollect(rtsBool force_major_gc, nat gc_type, Capability *cap);
/* -----------------------------------------------------------------------------
Generational garbage collection support
#if defined(THREADED_RTS)
extern Mutex sm_mutex;
extern Mutex atomic_modify_mutvar_mutex;
-extern SpinLock recordMutableGen_sync;
#endif
#if defined(THREADED_RTS)
#define ASSERT_SM_LOCK()
#endif
+#if !IN_STG_CODE
+
INLINE_HEADER void
-recordMutableGen(StgClosure *p, generation *gen)
+recordMutableGen(StgClosure *p, nat gen_no)
{
bdescr *bd;
- bd = gen->mut_list;
+ bd = generations[gen_no].mut_list;
if (bd->free >= bd->start + BLOCK_SIZE_W) {
bdescr *new_bd;
new_bd = allocBlock();
new_bd->link = bd;
bd = new_bd;
- gen->mut_list = bd;
+ generations[gen_no].mut_list = bd;
}
*bd->free++ = (StgWord)p;
}
INLINE_HEADER void
-recordMutableGenLock(StgClosure *p, generation *gen)
+recordMutableGenLock(StgClosure *p, nat gen_no)
{
ACQUIRE_SM_LOCK;
- recordMutableGen(p,gen);
+ recordMutableGen(p,gen_no);
RELEASE_SM_LOCK;
}
-extern bdescr *allocBlock_sync(void);
-
-// Version of recordMutableGen() for use in parallel GC. The same as
-// recordMutableGen(), except that we surround it with a spinlock and
-// call the spinlock version of allocBlock().
-INLINE_HEADER void
-recordMutableGen_GC(StgClosure *p, generation *gen)
-{
- bdescr *bd;
-
- ACQUIRE_SPIN_LOCK(&recordMutableGen_sync);
-
- bd = gen->mut_list;
- if (bd->free >= bd->start + BLOCK_SIZE_W) {
- bdescr *new_bd;
- new_bd = allocBlock_sync();
- new_bd->link = bd;
- bd = new_bd;
- gen->mut_list = bd;
- }
- *bd->free++ = (StgWord)p;
-
- RELEASE_SPIN_LOCK(&recordMutableGen_sync);
-}
-
INLINE_HEADER void
recordMutable(StgClosure *p)
{
bdescr *bd;
ASSERT(closure_MUTABLE(p));
bd = Bdescr((P_)p);
- if (bd->gen_no > 0) recordMutableGen(p, &RTS_DEREF(generations)[bd->gen_no]);
+ if (bd->gen_no > 0) recordMutableGen(p, bd->gen_no);
}
INLINE_HEADER void
RELEASE_SM_LOCK;
}
+#endif // !IN_STG_CODE
+
/* -----------------------------------------------------------------------------
The CAF table - used to let us revert CAFs in GHCi
-------------------------------------------------------------------------- */
make sense...
-------------------------------------------------------------------------- */
-#define LOOKS_LIKE_INFO_PTR(p) \
- (p && LOOKS_LIKE_INFO_PTR_NOT_NULL(p))
-
-#define LOOKS_LIKE_INFO_PTR_NOT_NULL(p) \
- (((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type != INVALID_OBJECT && \
- ((StgInfoTable *)(INFO_PTR_TO_STRUCT(p)))->type < N_CLOSURE_TYPES)
-
-#define LOOKS_LIKE_CLOSURE_PTR(p) \
- (LOOKS_LIKE_INFO_PTR((UNTAG_CLOSURE((StgClosure *)(p)))->header.info))
+INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR (StgWord p);
+INLINE_HEADER rtsBool LOOKS_LIKE_CLOSURE_PTR (void *p); // XXX StgClosure*
/* -----------------------------------------------------------------------------
Macros for calculating how big a closure will be (used during allocation)
extern StgClosure * RTS_VAR(revertible_caf_list);
extern StgTSO * RTS_VAR(resurrected_threads);
+#define IS_FORWARDING_PTR(p) ((((StgWord)p) & 1) != 0)
+#define MK_FORWARDING_PTR(p) (((StgWord)p) | 1)
+#define UN_FORWARDING_PTR(p) (((StgWord)p) - 1)
+
+INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR_NOT_NULL (StgWord p)
+{
+ StgInfoTable *info = INFO_PTR_TO_STRUCT(p);
+ return info->type != INVALID_OBJECT && info->type < N_CLOSURE_TYPES;
+}
+
+INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR (StgWord p)
+{
+ return p && (IS_FORWARDING_PTR(p) || LOOKS_LIKE_INFO_PTR_NOT_NULL(p));
+}
+
+INLINE_HEADER rtsBool LOOKS_LIKE_CLOSURE_PTR (void *p)
+{
+ return LOOKS_LIKE_INFO_PTR((StgWord)(UNTAG_CLOSURE((StgClosure *)(p)))->header.info);
+}
+
#endif /* STORAGE_H */