mkSeqInfoLabel = RtsLabel (RtsInfo SLIT("stg_seq_frame"))
mkIndStaticInfoLabel = RtsLabel (RtsInfo SLIT("stg_IND_STATIC"))
mkMainCapabilityLabel = RtsLabel (RtsData SLIT("MainCapability"))
-mkMAP_FROZEN_infoLabel = RtsLabel (RtsInfo SLIT("stg_MUT_ARR_PTRS_FROZEN"))
+mkMAP_FROZEN_infoLabel = RtsLabel (RtsInfo SLIT("stg_MUT_ARR_PTRS_FROZEN0"))
mkEMPTY_MVAR_infoLabel = RtsLabel (RtsInfo SLIT("stg_EMPTY_MVAR"))
mkTopTickyCtrLabel = RtsLabel (RtsData SLIT("top_ct"))
{ do lits <- sequence $4;
return $ map CmmStaticLit $
mkStaticClosure (mkRtsInfoLabelFS $3)
- dontCareCCS (map getLit lits) [] [] }
+ dontCareCCS (map getLit lits) [] [] [] }
-- arrays of closures required for the CHARLIKE & INTLIKE arrays
lits :: { [ExtFCode CmmExpr] }
staticClosure :: FastString -> FastString -> [CmmLit] -> ExtCode
staticClosure cl_label info payload
= code $ emitDataLits (mkRtsDataLabelFS cl_label) lits
- where lits = mkStaticClosure (mkRtsInfoLabelFS info) dontCareCCS payload [] []
+ where lits = mkStaticClosure (mkRtsInfoLabelFS info) dontCareCCS payload [] [] []
foreignCall
:: String
%
% (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
%
-% $Id: CgHeapery.lhs,v 1.42 2004/11/26 16:20:09 simonmar Exp $
+% $Id: CgHeapery.lhs,v 1.43 2005/02/10 13:01:53 simonmar Exp $
%
\section[CgHeapery]{Heap management functions}
import CgParallel ( staticGranHdr, staticParHdr, doGranAllocate )
import CgStackery ( getFinalStackHW, getRealSp )
import CgCallConv ( mkRegLiveness )
-import ClosureInfo ( closureSize, closureUpdReqd,
- staticClosureNeedsLink,
- mkConInfo,
+import ClosureInfo ( closureSize, staticClosureNeedsLink,
+ mkConInfo, closureNeedsUpdSpace,
infoTableLabelFromCI, closureLabelFromCI,
nodeMustPointToIt, closureLFInfo,
ClosureInfo )
-> [CmmLit] -- Payload
-> [CmmLit] -- The full closure
mkStaticClosureFields cl_info ccs caf_refs payload
- = mkStaticClosure info_lbl ccs payload padding_wds static_link_field
+ = mkStaticClosure info_lbl ccs payload padding_wds
+ static_link_field saved_info_field
where
info_lbl = infoTableLabelFromCI cl_info
- upd_reqd = closureUpdReqd cl_info
+ -- CAFs must have consistent layout, regardless of whether they
+ -- are actually updatable or not. The layout of a CAF is:
+ --
+ -- 3 saved_info
+ -- 2 static_link
+ -- 1 indirectee
+ -- 0 info ptr
+ --
+ -- the static_link and saved_info fields must always be in the same
+ -- place. So we use closureNeedsUpdSpace rather than
+ -- closureUpdReqd here:
+
+ is_caf = closureNeedsUpdSpace cl_info
- -- for the purposes of laying out the static closure, we consider all
- -- thunks to be "updatable", so that the static link field is always
- -- in the same place.
padding_wds
- | not upd_reqd = []
- | otherwise = replicate n (mkIntCLit 0) -- a bunch of 0s
+ | not is_caf = []
+ | otherwise = replicate n (mkIntCLit 0) -- a bunch of 0s
where n = max 0 (mIN_UPD_SIZE - length payload)
- -- We always have a static link field for a thunk, it's used to
- -- save the closure's info pointer when we're reverting CAFs
- -- (see comment in Storage.c)
static_link_field
- | upd_reqd || staticClosureNeedsLink cl_info = [static_link_value]
- | otherwise = []
+ | is_caf || staticClosureNeedsLink cl_info = [static_link_value]
+ | otherwise = []
+
+ saved_info_field
+ | is_caf = [mkIntCLit 0]
+ | otherwise = []
-- for a static constructor which has NoCafRefs, we set the
-- static link field to a non-zero value so the garbage
| otherwise = mkIntCLit 1
mkStaticClosure :: CLabel -> CostCentreStack -> [CmmLit]
- -> [CmmLit] -> [CmmLit] -> [CmmLit]
-mkStaticClosure info_lbl ccs payload padding_wds static_link_field
+ -> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit]
+mkStaticClosure info_lbl ccs payload padding_wds static_link_field saved_info_field
= [CmmLabel info_lbl]
++ variable_header_words
++ payload
++ padding_wds
++ static_link_field
+ ++ saved_info_field
where
variable_header_words
= staticGranHdr
-- #define unsafeFreezzeArrayzh(r,a)
-- {
--- SET_INFO((StgClosure *)a,&stg_MUT_ARR_PTRS_FROZEN_info);
+-- SET_INFO((StgClosure *)a,&stg_MUT_ARR_PTRS_FROZEN0_info);
-- r = a;
-- }
emitPrimOp [res] UnsafeFreezeArrayOp [arg] live
closureName, infoTableLabelFromCI,
closureLabelFromCI, closureSRT,
- closureLFInfo, closureSMRep, closureUpdReqd,
+ closureLFInfo, closureSMRep, closureUpdReqd,
+ closureNeedsUpdSpace,
closureSingleEntry, closureReEntrant, isConstrClosure_maybe,
closureFunInfo, isStandardFormThunk, isKnownFun,
/* -----------------------------------------------------------------------------
- * $Id: Block.h,v 1.17 2004/08/13 13:09:09 simonmar Exp $
+ * $Id: Block.h,v 1.18 2005/02/10 13:02:00 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
(1 + (W_)MBLOCK_ROUND_UP((n-BLOCKS_PER_MBLOCK) * BLOCK_SIZE) / MBLOCK_SIZE)
+#ifndef CMINUSMINUS
+// to the end...
+
/* Double-linked block lists: --------------------------------------------- */
-#ifndef CMINUSMINUS
INLINE_HEADER void
dbl_link_onto(bdescr *bd, bdescr **list)
{
}
*list = bd;
}
-#endif
+/* Initialisation ---------------------------------------------------------- */
+
+extern void initBlockAllocator(void);
+
+/* Allocation -------------------------------------------------------------- */
+
+extern bdescr *allocGroup(nat n);
+extern bdescr *allocBlock(void);
+
+/* De-Allocation ----------------------------------------------------------- */
+
+extern void freeGroup(bdescr *p);
+extern void freeChain(bdescr *p);
+
+/* Round a value to megablocks --------------------------------------------- */
+
+#define WORDS_PER_MBLOCK (BLOCKS_PER_MBLOCK * BLOCK_SIZE_W)
+
+INLINE_HEADER nat
+round_to_mblocks(nat words)
+{
+ if (words > WORDS_PER_MBLOCK) {
+ if ((words % WORDS_PER_MBLOCK) < (WORDS_PER_MBLOCK / 2)) {
+ words = (words / WORDS_PER_MBLOCK) * WORDS_PER_MBLOCK;
+ } else {
+ words = ((words / WORDS_PER_MBLOCK) + 1) * WORDS_PER_MBLOCK;
+ }
+ }
+ return words;
+}
+
+#endif /* !CMINUSMINUS */
#endif /* BLOCK_H */
/* These macros are optimised versions of the above for certain
* closure types. They *must* be equivalent to the generic
* STATIC_LINK.
- *
- * You may be surprised that the STATIC_LINK field for a THUNK_STATIC
- * is at offset 2; that's because a THUNK_STATIC always has two words
- * of (non-ptr) padding, to make room for the IND_STATIC that is
- * going to overwrite it. It doesn't do any harm, because a
- * THUNK_STATIC needs this extra word for the IND_STATIC's saved_info
- * field anyhow. Hmm, this is all rather delicate. --SDM
*/
#define FUN_STATIC_LINK(p) ((p)->payload[0])
-#define THUNK_STATIC_LINK(p) ((p)->payload[2])
+#define THUNK_STATIC_LINK(p) ((p)->payload[1])
#define IND_STATIC_LINK(p) ((p)->payload[1])
#define STATIC_LINK2(info,p) \
/* ----------------------------------------------------------------------------
- * $Id: ClosureTypes.h,v 1.19 2004/11/18 09:56:17 tharris Exp $
+ * $Id: ClosureTypes.h,v 1.20 2005/02/10 13:02:02 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
#define MVAR 50
#define ARR_WORDS 51
#define MUT_ARR_PTRS 52
-#define MUT_ARR_PTRS_FROZEN 53
-#define MUT_VAR 54
-#define MUT_CONS 55
+#define MUT_ARR_PTRS_FROZEN0 53
+#define MUT_ARR_PTRS_FROZEN 54
+#define MUT_VAR 55
#define WEAK 56
#define FOREIGN 57
#define STABLE_NAME 58
#define CATCH_RETRY_FRAME 71
#define CATCH_STM_FRAME 72
#define N_CLOSURE_TYPES 73
-
+
#endif /* CLOSURETYPES_H */
struct StgClosure_ *payload[FLEXIBLE_ARRAY];
};
-/* What a stroke of luck - all our mutable closures follow the same
- * basic layout, with the mutable link field as the second field after
- * the header. This means the following structure is the supertype of
- * mutable closures.
- */
-
-typedef struct StgMutClosure_ {
- StgHeader header;
- StgWord padding;
- struct StgMutClosure_ *mut_link;
- struct StgClosure_ *payload[FLEXIBLE_ARRAY];
-} StgMutClosure;
-
typedef struct {
StgHeader header;
StgClosure *selectee;
} StgInd;
typedef struct {
- StgHeader header;
- StgClosure *indirectee;
- StgMutClosure *mut_link;
-} StgIndOldGen;
-
-typedef struct {
StgHeader header;
StgClosure *indirectee;
StgClosure *static_link;
typedef struct {
StgHeader header;
StgWord ptrs;
- StgMutClosure *mut_link; /* mutable list */
StgClosure *payload[FLEXIBLE_ARRAY];
} StgMutArrPtrs;
typedef struct {
StgHeader header;
StgClosure *var;
- StgMutClosure *mut_link;
} StgMutVar;
typedef struct _StgUpdateFrame {
typedef struct {
StgHeader header;
struct StgTSO_ *head;
- StgMutClosure *mut_link;
struct StgTSO_ *tail;
StgClosure* value;
} StgMVar;
typedef struct StgTVarWaitQueue_ {
StgHeader header;
struct StgTSO_ *waiting_tso;
- StgMutClosure *mut_link;
struct StgTVarWaitQueue_ *next_queue_entry;
struct StgTVarWaitQueue_ *prev_queue_entry;
} StgTVarWaitQueue;
typedef struct {
StgHeader header;
StgClosure *current_value;
- StgMutClosure *mut_link;
StgTVarWaitQueue *first_wait_queue_entry;
} StgTVar;
typedef struct StgTRecChunk_ {
StgHeader header;
struct StgTRecChunk_ *prev_chunk;
- StgMutClosure *mut_link;
StgWord next_entry_idx;
TRecEntry entries[TREC_CHUNK_NUM_ENTRIES];
} StgTRecChunk;
typedef struct StgTRecHeader_ {
StgHeader header;
TRecState state;
- StgMutClosure *mut_link;
struct StgTRecHeader_ *enclosing_trec;
StgTRecChunk *current_chunk;
} StgTRecHeader;
of closures that can be found on a blocking queue: StgTSO, StgRBHSave,
StgBlockedFetch. (StgRBHSave can only appear at the end of a blocking
queue). Logically, this is a union type, but defining another struct
- with a common layout is easier to handle in the code (same as for
- StgMutClosures).
+ with a common layout is easier to handle in the code.
Note that in the standard setup only StgTSOs can be on a blocking queue.
This is one of the main reasons for slightly different code in files
such as Schedule.c.
typedef struct StgBlockingQueueElement_ {
StgHeader header;
struct StgBlockingQueueElement_ *link; /* next elem in BQ */
- StgMutClosure *mut_link; /* next elem in mutable list */
struct StgClosure_ *payload[FLEXIBLE_ARRAY];/* contents of the closure */
} StgBlockingQueueElement;
typedef struct StgBlockingQueue_ {
StgHeader header;
struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
- StgMutClosure *mut_link; /* next elem in mutable list */
} StgBlockingQueue;
/* this closure is hanging at the end of a blocking queue in (see RBH.c) */
typedef struct StgRBH_ {
StgHeader header;
struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
- StgMutClosure *mut_link; /* next elem in mutable list */
} StgRBH;
#else
typedef struct StgBlockingQueue_ {
StgHeader header;
struct StgTSO_ *blocking_queue;
- StgMutClosure *mut_link;
} StgBlockingQueue;
#endif
typedef struct StgFetchMe_ {
StgHeader header;
globalAddr *ga; /* ptr to unique id for a closure */
- StgMutClosure *mut_link; /* next elem in mutable list */
} StgFetchMe;
/* same contents as an ordinary StgBlockingQueue */
typedef struct StgFetchMeBlockingQueue_ {
StgHeader header;
struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */
- StgMutClosure *mut_link; /* next elem in mutable list */
} StgFetchMeBlockingQueue;
/* This is an entry in a blocking queue. It indicates a fetch request from a
typedef struct StgBlockedFetch_ {
StgHeader header;
struct StgBlockingQueueElement_ *link; /* next elem in the BQ */
- StgMutClosure *mut_link; /* next elem in mutable list */
StgClosure *node; /* node to fetch */
globalAddr ga; /* where to send the result to */
} StgBlockedFetch; /* NB: not just a ptr to a GA */
/* ----------------------------------------------------------------------------
- * $Id: Constants.h,v 1.27 2004/11/18 09:56:19 tharris Exp $
+ * $Id: Constants.h,v 1.28 2005/02/10 13:02:03 simonmar Exp $
*
* (c) The GHC Team, 1998-2002
*
/* -----------------------------------------------------------------------------
Minimum closure sizes
- Here we define the minimum size for updatable closures. This must be at
- least 2, to allow for cons cells and linked indirections. All updates
+ Here we define the minimum size for updatable closures. All updates
will be performed on closures of this size. For non-updatable closures
the minimum size is 1 to allow for a forwarding pointer.
- Linked indirections are UPD_OLDGEN things: see Closures.h
+ When we used to keep the mutable list threaded through closures on
+ the heap, MIN_UPD_SIZE used to be 2. Now it's 1.
o MIN_UPD_SIZE doesn't apply to stack closures, static closures
or non-updateable objects like PAPs or CONSTRs
o EVACUATED
-------------------------------------------------------------------------- */
-#define MIN_UPD_SIZE 2
+#define MIN_UPD_SIZE 1
#define MIN_NONUPD_SIZE 1
/* -----------------------------------------------------------------------------
RTS_INFO(stg_MUT_ARR_WORDS_info);
RTS_INFO(stg_MUT_ARR_PTRS_info);
RTS_INFO(stg_MUT_ARR_PTRS_FROZEN_info);
+RTS_INFO(stg_MUT_ARR_PTRS_FROZEN0_info);
RTS_INFO(stg_MUT_VAR_info);
RTS_INFO(stg_END_TSO_QUEUE_info);
RTS_INFO(stg_MUT_CONS_info);
-RTS_INFO(stg_END_MUT_LIST_info);
RTS_INFO(stg_catch_info);
RTS_INFO(stg_PAP_info);
RTS_INFO(stg_AP_info);
RTS_ENTRY(stg_MUT_VAR_entry);
RTS_ENTRY(stg_END_TSO_QUEUE_entry);
RTS_ENTRY(stg_MUT_CONS_entry);
-RTS_ENTRY(stg_END_MUT_LIST_entry);
RTS_ENTRY(stg_catch_entry);
RTS_ENTRY(stg_PAP_entry);
RTS_ENTRY(stg_AP_entry);
/* closures */
RTS_CLOSURE(stg_END_TSO_QUEUE_closure);
-RTS_CLOSURE(stg_END_MUT_LIST_closure);
RTS_CLOSURE(stg_NO_FINALIZER_closure);
RTS_CLOSURE(stg_dummy_ret_closure);
RTS_CLOSURE(stg_forceIO_closure);
step * steps; /* steps */
unsigned int n_steps; /* number of steps */
unsigned int max_blocks; /* max blocks in step 0 */
- StgMutClosure *mut_list; /* mut objects in this gen (not G0)*/
- StgMutClosure *mut_once_list; /* objects that point to younger gens */
+ bdescr *mut_list; /* mut objects in this gen (not G0)*/
/* temporary use during GC: */
- StgMutClosure * saved_mut_list;
+ bdescr *saved_mut_list;
/* stats information */
unsigned int collections;
#define RELEASE_SM_LOCK
#endif
-/* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some
- * kind of lock in the SMP case?
+/* ToDo: shouldn't recordMutable acquire some
+ * kind of lock in the SMP case? Or do we need per-processor
+ * mutable lists?
*/
INLINE_HEADER void
-recordMutable(StgMutClosure *p)
+recordMutableGen(StgClosure *p, generation *gen)
{
- bdescr *bd;
-
-#ifdef SMP
- ASSERT(p->header.info == &stg_WHITEHOLE_info || closure_MUTABLE(p));
-#else
- ASSERT(closure_MUTABLE(p));
-#endif
-
- bd = Bdescr((P_)p);
- if (bd->gen_no > 0) {
- p->mut_link = RTS_DEREF(generations)[bd->gen_no].mut_list;
- RTS_DEREF(generations)[bd->gen_no].mut_list = p;
- }
+ bdescr *bd;
+
+ bd = gen->mut_list;
+ if (bd->free >= bd->start + BLOCK_SIZE_W) {
+ bdescr *new_bd;
+ new_bd = allocBlock();
+ new_bd->link = bd;
+ bd = new_bd;
+ gen->mut_list = bd;
+ }
+ *bd->free++ = (StgWord)p;
}
INLINE_HEADER void
-recordOldToNewPtrs(StgMutClosure *p)
+recordMutable(StgClosure *p)
{
- bdescr *bd;
-
- bd = Bdescr((P_)p);
- if (bd->gen_no > 0) {
- p->mut_link = RTS_DEREF(generations)[bd->gen_no].mut_once_list;
- RTS_DEREF(generations)[bd->gen_no].mut_once_list = p;
- }
+ bdescr *bd;
+ ASSERT(closure_MUTABLE(p));
+ bd = Bdescr((P_)p);
+ if (bd->gen_no > 0) recordMutableGen(p, &RTS_DEREF(generations)[bd->gen_no]);
}
/* -----------------------------------------------------------------------------
{ return sizeofW(StgHeader) + p + np; }
INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void )
-{ return sizeofW(StgHeader) + MIN_UPD_SIZE; }
+{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgSelector)); }
INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
-{ return sizeofW(StgHeader) + MIN_UPD_SIZE; }
+{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgBlockingQueue)); }
/* --------------------------------------------------------------------------
Sizes of closures
extern void tidyAllocateLists ( void );
/* -----------------------------------------------------------------------------
- MUTABLE LISTS
- A mutable list is ended with END_MUT_LIST, so that we can use NULL
- as an indication that an object is not on a mutable list.
- ------------------------------------------------------------------------- */
-
-#define END_MUT_LIST ((StgMutClosure *)(void *)&stg_END_MUT_LIST_closure)
-
-/* -----------------------------------------------------------------------------
Functions from GC.c
-------------------------------------------------------------------------- */
/* -----------------------------------------------------------------------------
- * $Id: TSO.h,v 1.40 2005/01/28 12:55:53 simonmar Exp $
+ * $Id: TSO.h,v 1.41 2005/02/10 13:02:05 simonmar Exp $
*
* (c) The GHC Team, 1998-1999
*
StgHeader header;
struct StgTSO_* link; // Links threads onto blocking queues */
- StgMutClosure * mut_link; // TSO's are mutable of course! */
struct StgTSO_* global_link; // Links all threads together */
StgWord16 what_next; // Values defined in Constants.h
} else { \
if (info != stg_BLACKHOLE_BQ_info) { \
DEBUG_FILL_SLOP(p1); \
- W_ __mut_once_list; \
- __mut_once_list = generation(TO_W_(bdescr_gen_no(bd))) + \
- OFFSET_generation_mut_once_list; \
- StgMutClosure_mut_link(p1) = W_[__mut_once_list]; \
- W_[__mut_once_list] = p1; \
+ foreign "C" recordMutableGen(p1 "ptr", \
+ generation(TO_W_(bdescr_gen_no(bd))) "ptr"); \
} \
StgInd_indirectee(p1) = p2; \
SET_INFO(p1, stg_IND_OLDGEN_info); \
} else { \
if (_info != &stg_BLACKHOLE_BQ_info) { \
DEBUG_FILL_SLOP(p1); \
- ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
- generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
+ recordMutableGen(p1, &generations[bd->gen_no]); \
} \
- ((StgIndOldGen *)p1)->indirectee = p2; \
+ ((StgInd *)p1)->indirectee = p2; \
SET_INFO(p1, &stg_IND_OLDGEN_info); \
TICK_UPD_OLD_IND(); \
and_then; \
TICK_UPD_NEW_PERM_IND(p1);
} else {
if (info != &stg_BLACKHOLE_BQ_info) {
- ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
- generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
+ recordMutableGen(p1, &generations[bd->gen_no]);
}
- ((StgIndOldGen *)p1)->indirectee = p2;
+ ((StgInd *)p1)->indirectee = p2;
SET_INFO(p1, &stg_IND_OLDGEN_PERM_info);
// @LDV profiling
// We have just created a new closure.
struct_field(bdescr, link);
struct_size(generation);
- struct_field(generation, mut_once_list);
+ struct_field(generation, mut_list);
struct_field(CostCentreStack, ccsID);
struct_field(CostCentreStack, mem_alloc);
closure_payload(StgArrWords, payload);
closure_field(StgTSO, link);
- closure_field(StgTSO, mut_link);
closure_field(StgTSO, global_link);
closure_field(StgTSO, what_next);
closure_field(StgTSO, why_blocked);
closure_payload(StgAP_STACK, payload);
closure_field(StgInd, indirectee);
- closure_field(StgMutClosure, mut_link);
closure_size(StgMutVar);
closure_field(StgMutVar, var);
if (n != 0) {
head->blocks = n;
- head->free = head->start;
+ head->free = head->start;
+ head->link = NULL;
for (i=1, bd = head+1; i < n; i++, bd++) {
bd->free = 0;
bd->blocks = 0;
*last = bd->link;
/* no initialisation necessary - this is already a
* self-contained block group. */
-#ifdef DEBUG
bd->free = bd->start; /* block isn't free now */
-#endif
+ bd->link = NULL;
return bd;
}
if (bd->blocks > n) { /* block too big... */
return;
}
-#ifdef DEBUG
+
p->free = (void *)-1; /* indicates that this block is free */
p->step = NULL;
p->gen_no = 0;
/* fill the block group with garbage if sanity checking is on */
IF_DEBUG(sanity,memset(p->start, 0xaa, p->blocks * BLOCK_SIZE));
-#endif
/* find correct place in free list to place new group */
last = NULL;
#ifndef BLOCK_ALLOC_H
#define BLOCK_ALLOC_H
-/* Initialisation ---------------------------------------------------------- */
-
-extern void initBlockAllocator(void);
-
-/* Allocation -------------------------------------------------------------- */
-
-extern bdescr *allocGroup(nat n);
-extern bdescr *allocBlock(void);
-
-/* De-Allocation ----------------------------------------------------------- */
-
-extern void freeGroup(bdescr *p);
-extern void freeChain(bdescr *p);
-
-/* Round a value to megablocks --------------------------------------------- */
-
-#define WORDS_PER_MBLOCK (BLOCKS_PER_MBLOCK * BLOCK_SIZE_W)
-
-INLINE_HEADER nat
-round_to_mblocks(nat words)
-{
- if (words > WORDS_PER_MBLOCK) {
- if ((words % WORDS_PER_MBLOCK) < (WORDS_PER_MBLOCK / 2)) {
- words = (words / WORDS_PER_MBLOCK) * WORDS_PER_MBLOCK;
- } else {
- words = ((words / WORDS_PER_MBLOCK) + 1) * WORDS_PER_MBLOCK;
- }
- }
- return words;
-}
-
/* Debugging -------------------------------------------------------------- */
#ifdef DEBUG
REGPARM1 static StgClosure * evacuate (StgClosure *q);
static void zero_static_object_list ( StgClosure* first_static );
-static void zero_mutable_list ( StgMutClosure *first );
static rtsBool traverse_weak_ptr_list ( void );
static void mark_weak_ptr_list ( StgWeak **list );
static void scavenge_large ( step * );
static void scavenge_static ( void );
static void scavenge_mutable_list ( generation *g );
-static void scavenge_mut_once_list ( generation *g );
static void scavenge_large_bitmap ( StgPtr p,
StgLargeBitmap *large_bitmap,
(and all younger generations):
- follow all pointers in the root set. the root set includes all
- mutable objects in all generations (mutable_list and mut_once_list).
+ mutable objects in all generations (mutable_list).
- for each pointer, evacuate the object it points to into either
When we evacuate an object we attempt to evacuate
everything it points to into the same generation - this is
achieved by setting evac_gen to the desired generation. If
- we can't do this, then an entry in the mut_once list has to
+ we can't do this, then an entry in the mut list has to
be made for the cross-generation pointer.
+ if the object is already in a generation > N, then leave
static_objects = END_OF_STATIC_LIST;
scavenged_static_objects = END_OF_STATIC_LIST;
- /* zero the mutable list for the oldest generation (see comment by
- * zero_mutable_list below).
- */
- if (major_gc) {
- zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
- }
-
/* Save the old to-space if we're doing a two-space collection
*/
if (RtsFlags.GcFlags.generations == 1) {
// collecting.
//
for (g = 0; g <= N; g++) {
- generations[g].mut_once_list = END_MUT_LIST;
- generations[g].mut_list = END_MUT_LIST;
+
+ // throw away the mutable list. Invariant: the mutable list
+ // always has at least one block; this means we can avoid a check for
+ // NULL in recordMutable().
+ if (g != 0) {
+ freeChain(generations[g].mut_list);
+ generations[g].mut_list = allocBlock();
+ }
for (s = 0; s < generations[g].n_steps; s++) {
int st;
for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
generations[g].saved_mut_list = generations[g].mut_list;
- generations[g].mut_list = END_MUT_LIST;
+ generations[g].mut_list = allocBlock();
+ // mut_list always has at least one block.
}
- // Do the mut-once lists first
for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
- IF_PAR_DEBUG(verbose,
- printMutOnceList(&generations[g]));
- scavenge_mut_once_list(&generations[g]);
- evac_gen = g;
- for (st = generations[g].n_steps-1; st >= 0; st--) {
- scavenge(&generations[g].steps[st]);
- }
- }
-
- for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
- IF_PAR_DEBUG(verbose,
- printMutableList(&generations[g]));
+ IF_PAR_DEBUG(verbose, printMutableList(&generations[g]));
scavenge_mutable_list(&generations[g]);
evac_gen = g;
for (st = generations[g].n_steps-1; st >= 0; st--) {
generations[g].collections++; // for stats
}
+ // Count the mutable list as bytes "copied" for the purposes of
+ // stats. Every mutable list is copied during every GC.
+ if (g > 0) {
+ for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
+ copied += (bd->free - bd->start) * sizeof(StgWord);
+ }
+ }
+
for (s = 0; s < generations[g].n_steps; s++) {
bdescr *next;
stp = &generations[g].steps[s];
}
/* -----------------------------------------------------------------------------
- Adding a MUT_CONS to an older generation.
-
- This is necessary from time to time when we end up with an
- old-to-new generation pointer in a non-mutable object. We defer
- the promotion until the next GC.
- -------------------------------------------------------------------------- */
-
-static StgClosure *
-mkMutCons(StgClosure *ptr, generation *gen)
-{
- StgMutVar *q;
- step *stp;
-
- stp = &gen->steps[0];
-
- /* chain a new block onto the to-space for the destination step if
- * necessary.
- */
- if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
- gc_alloc_block(stp);
- }
-
- q = (StgMutVar *)stp->hp;
- stp->hp += sizeofW(StgMutVar);
-
- SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
- q->var = ptr;
- recordOldToNewPtrs((StgMutClosure *)q);
-
- return (StgClosure *)q;
-}
-
-/* -----------------------------------------------------------------------------
Evacuate
This is called (eventually) for every live object in the system.
case FUN_1_0:
case FUN_0_1:
case CONSTR_1_0:
+ case THUNK_1_0:
+ case THUNK_0_1:
return copy(q,sizeofW(StgHeader)+1,stp);
- case THUNK_1_0: // here because of MIN_UPD_SIZE
- case THUNK_0_1:
case THUNK_1_1:
case THUNK_0_2:
case THUNK_2_0:
case MUT_ARR_PTRS:
case MUT_ARR_PTRS_FROZEN:
+ case MUT_ARR_PTRS_FROZEN0:
// just copy the block
return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
switch (info->type) {
case MVAR:
- /* treat MVars specially, because we don't want to evacuate the
- * mut_link field in the middle of the closure.
- */
{
StgMVar *mvar = ((StgMVar *)p);
evac_gen = 0;
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)mvar);
- failed_to_evac = rtsFalse; // mutable.
+ failed_to_evac = rtsTrue; // mutable.
p += sizeofW(StgMVar);
break;
}
case THUNK_1_0:
scavenge_thunk_srt(info);
((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
+ p += sizeofW(StgHeader) + 1;
break;
case FUN_1_0:
case THUNK_0_1:
scavenge_thunk_srt(info);
- p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
+ p += sizeofW(StgHeader) + 1;
break;
case FUN_0_1:
}
// fall through
case IND_OLDGEN_PERM:
- ((StgIndOldGen *)p)->indirectee =
- evacuate(((StgIndOldGen *)p)->indirectee);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordOldToNewPtrs((StgMutClosure *)p);
- }
- p += sizeofW(StgIndOldGen);
+ ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
+ p += sizeofW(StgInd);
break;
case MUT_VAR:
evac_gen = 0;
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)p);
- failed_to_evac = rtsFalse; // mutable anyhow
- p += sizeofW(StgMutVar);
- break;
-
- case MUT_CONS:
- // ignore these
- failed_to_evac = rtsFalse; // mutable anyhow
+ failed_to_evac = rtsTrue; // mutable anyhow
p += sizeofW(StgMutVar);
break;
StgBlockingQueue *bh = (StgBlockingQueue *)p;
bh->blocking_queue =
(StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
- recordMutable((StgMutClosure *)bh);
- failed_to_evac = rtsFalse;
+ failed_to_evac = rtsTrue;
p += BLACKHOLE_sizeW();
break;
}
*p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
}
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)q);
- failed_to_evac = rtsFalse; // mutable anyhow.
+ failed_to_evac = rtsTrue; // mutable anyhow.
break;
}
case MUT_ARR_PTRS_FROZEN:
+ case MUT_ARR_PTRS_FROZEN0:
// follow everything
{
StgPtr next;
- // Set the mut_link field to NULL, so that we will put this
- // array back on the mutable list if it is subsequently thawed
- // by unsafeThaw#.
- ((StgMutArrPtrs*)p)->mut_link = NULL;
-
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
*p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
evac_gen = 0;
scavengeTSO(tso);
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)tso);
- failed_to_evac = rtsFalse; // mutable anyhow.
+ failed_to_evac = rtsTrue; // mutable anyhow.
p += tso_sizeW(tso);
break;
}
StgRBH *rbh = (StgRBH *)p;
(StgClosure *)rbh->blocking_queue =
evacuate((StgClosure *)rbh->blocking_queue);
- recordMutable((StgMutClosure *)to);
- failed_to_evac = rtsFalse; // mutable anyhow.
+ failed_to_evac = rtsTrue; // mutable anyhow.
IF_DEBUG(gc,
debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue));
// follow the link to the rest of the blocking queue
(StgClosure *)bf->link =
evacuate((StgClosure *)bf->link);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)bf);
- }
IF_DEBUG(gc,
debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
bf, info_type((StgClosure *)bf),
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
evacuate((StgClosure *)fmbq->blocking_queue);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)fmbq);
- }
IF_DEBUG(gc,
debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
p, info_type((StgClosure *)p)));
wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)wq);
- failed_to_evac = rtsFalse; // mutable
+ failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTVarWaitQueue);
break;
}
tvar->current_value = evacuate((StgClosure*)tvar->current_value);
tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)tvar);
- failed_to_evac = rtsFalse; // mutable
+ failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTVar);
break;
}
trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)trec);
- failed_to_evac = rtsFalse; // mutable
+ failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecHeader);
break;
}
e->new_value = evacuate((StgClosure*)e->new_value);
}
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)tc);
- failed_to_evac = rtsFalse; // mutable
+ failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecChunk);
break;
}
info->type, p);
}
- /* If we didn't manage to promote all the objects pointed to by
- * the current object, then we have to designate this object as
- * mutable (because it contains old-to-new generation pointers).
+ /*
+ * We need to record the current object on the mutable list if
+ * (a) It is actually mutable, or
+ * (b) It contains pointers to a younger generation.
+ * Case (b) arises if we didn't manage to promote everything that
+ * the current object points to into the current generation.
*/
if (failed_to_evac) {
failed_to_evac = rtsFalse;
- mkMutCons((StgClosure *)q, &generations[evac_gen]);
+ recordMutableGen((StgClosure *)q, stp->gen);
}
}
switch (info->type) {
case MVAR:
- /* treat MVars specially, because we don't want to evacuate the
- * mut_link field in the middle of the closure.
- */
{
StgMVar *mvar = ((StgMVar *)p);
evac_gen = 0;
mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
mvar->value = evacuate((StgClosure *)mvar->value);
evac_gen = saved_evac_gen;
- failed_to_evac = rtsFalse; // mutable.
+ failed_to_evac = rtsTrue; // mutable.
break;
}
case IND_OLDGEN:
case IND_OLDGEN_PERM:
- ((StgIndOldGen *)p)->indirectee =
- evacuate(((StgIndOldGen *)p)->indirectee);
- if (failed_to_evac) {
- recordOldToNewPtrs((StgMutClosure *)p);
- }
- failed_to_evac = rtsFalse;
+ ((StgInd *)p)->indirectee =
+ evacuate(((StgInd *)p)->indirectee);
break;
case MUT_VAR:
evac_gen = 0;
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
evac_gen = saved_evac_gen;
- failed_to_evac = rtsFalse;
- break;
-
- case MUT_CONS:
- // ignore these
- failed_to_evac = rtsFalse;
+ failed_to_evac = rtsTrue;
break;
case CAF_BLACKHOLE:
StgBlockingQueue *bh = (StgBlockingQueue *)p;
bh->blocking_queue =
(StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
- failed_to_evac = rtsFalse;
+ failed_to_evac = rtsTrue;
break;
}
*p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
}
evac_gen = saved_evac_gen;
- failed_to_evac = rtsFalse; // mutable anyhow.
+ failed_to_evac = rtsTrue; // mutable anyhow.
break;
}
case MUT_ARR_PTRS_FROZEN:
+ case MUT_ARR_PTRS_FROZEN0:
// follow everything
{
StgPtr next;
- // Set the mut_link field to NULL, so that we will put this
- // array on the mutable list if it is subsequently thawed
- // by unsafeThaw#.
- ((StgMutArrPtrs*)p)->mut_link = NULL;
-
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
*p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
evac_gen = 0;
scavengeTSO(tso);
evac_gen = saved_evac_gen;
- failed_to_evac = rtsFalse;
+ failed_to_evac = rtsTrue;
break;
}
StgRBH *rbh = (StgRBH *)p;
bh->blocking_queue =
(StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
- recordMutable((StgMutClosure *)rbh);
- failed_to_evac = rtsFalse; // mutable anyhow.
+ failed_to_evac = rtsTrue; // mutable anyhow.
IF_DEBUG(gc,
debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue));
// follow the link to the rest of the blocking queue
(StgClosure *)bf->link =
evacuate((StgClosure *)bf->link);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)bf);
- }
IF_DEBUG(gc,
debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
bf, info_type((StgClosure *)bf),
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
evacuate((StgClosure *)fmbq->blocking_queue);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)fmbq);
- }
IF_DEBUG(gc,
debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
p, info_type((StgClosure *)p)));
wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)wq);
- failed_to_evac = rtsFalse; // mutable
+ failed_to_evac = rtsTrue; // mutable
break;
}
tvar->current_value = evacuate((StgClosure*)tvar->current_value);
tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)tvar);
- failed_to_evac = rtsFalse; // mutable
+ failed_to_evac = rtsTrue; // mutable
break;
}
e->new_value = evacuate((StgClosure*)e->new_value);
}
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)tc);
- failed_to_evac = rtsFalse; // mutable
+ failed_to_evac = rtsTrue; // mutable
break;
}
trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
evac_gen = saved_evac_gen;
- recordMutable((StgMutClosure *)trec);
- failed_to_evac = rtsFalse; // mutable
+ failed_to_evac = rtsTrue; // mutable
break;
}
if (failed_to_evac) {
failed_to_evac = rtsFalse;
- mkMutCons((StgClosure *)q, &generations[evac_gen]);
+ recordMutableGen((StgClosure *)q, &generations[evac_gen]);
}
// mark the next bit to indicate "scavenged"
switch (info->type) {
+ case MVAR:
+ {
+ StgMVar *mvar = ((StgMVar *)p);
+ evac_gen = 0;
+ mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
+ mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
+ mvar->value = evacuate((StgClosure *)mvar->value);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsTrue; // mutable.
+ break;
+ }
+
case FUN:
case FUN_1_0: // hardly worth specialising these guys
case FUN_0_1:
case WEAK:
case FOREIGN:
case IND_PERM:
- case IND_OLDGEN_PERM:
{
StgPtr q, end;
break;
}
+ case MUT_VAR:
+ evac_gen = 0;
+ ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsTrue; // mutable anyhow
+ break;
+
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
break;
+ case BLACKHOLE_BQ:
+ {
+ StgBlockingQueue *bh = (StgBlockingQueue *)p;
+ evac_gen = 0; // repeatedly mutable
+ bh->blocking_queue =
+ (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
+ failed_to_evac = rtsTrue;
+ break;
+ }
+
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
break;
}
+ case AP_STACK:
+ {
+ StgAP_STACK *ap = (StgAP_STACK *)p;
+
+ ap->fun = evacuate(ap->fun);
+ scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
+ p = (StgPtr)ap->payload + ap->size;
+ break;
+ }
+
+ case PAP:
+ case AP:
+ p = scavenge_PAP((StgPAP *)p);
+ break;
+
case ARR_WORDS:
// nothing to follow
break;
StgPtr next;
evac_gen = 0; // repeatedly mutable
- recordMutable((StgMutClosure *)p);
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
*p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
}
evac_gen = saved_evac_gen;
- failed_to_evac = rtsFalse;
+ failed_to_evac = rtsTrue;
break;
}
case MUT_ARR_PTRS_FROZEN:
+ case MUT_ARR_PTRS_FROZEN0:
{
// follow everything
StgPtr next;
- // Set the mut_link field to NULL, so that we will put this
- // array on the mutable list if it is subsequently thawed
- // by unsafeThaw#.
- ((StgMutArrPtrs*)p)->mut_link = NULL;
-
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
*p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
evac_gen = 0; // repeatedly mutable
scavengeTSO(tso);
- recordMutable((StgMutClosure *)tso);
evac_gen = saved_evac_gen;
- failed_to_evac = rtsFalse;
+ failed_to_evac = rtsTrue;
break;
}
- case AP_STACK:
- {
- StgAP_STACK *ap = (StgAP_STACK *)p;
-
- ap->fun = evacuate(ap->fun);
- scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
- p = (StgPtr)ap->payload + ap->size;
+#if defined(PAR)
+ case RBH: // cf. BLACKHOLE_BQ
+ {
+#if 0
+ nat size, ptrs, nonptrs, vhs;
+ char str[80];
+ StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
+#endif
+ StgRBH *rbh = (StgRBH *)p;
+ (StgClosure *)rbh->blocking_queue =
+ evacuate((StgClosure *)rbh->blocking_queue);
+ failed_to_evac = rtsTrue; // mutable anyhow.
+ IF_DEBUG(gc,
+ debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
+ p, info_type(p), (StgClosure *)rbh->blocking_queue));
+ // ToDo: use size of reverted closure here!
break;
}
- case PAP:
- case AP:
- p = scavenge_PAP((StgPAP *)p);
- break;
-
- case IND_OLDGEN:
- // This might happen if for instance a MUT_CONS was pointing to a
- // THUNK which has since been updated. The IND_OLDGEN will
- // be on the mutable list anyway, so we don't need to do anything
- // here.
+ case BLOCKED_FETCH:
+ {
+ StgBlockedFetch *bf = (StgBlockedFetch *)p;
+ // follow the pointer to the node which is being demanded
+ (StgClosure *)bf->node =
+ evacuate((StgClosure *)bf->node);
+ // follow the link to the rest of the blocking queue
+ (StgClosure *)bf->link =
+ evacuate((StgClosure *)bf->link);
+ IF_DEBUG(gc,
+ debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
+ bf, info_type((StgClosure *)bf),
+ bf->node, info_type(bf->node)));
break;
+ }
- default:
- barf("scavenge_one: strange object %d", (int)(info->type));
- }
-
- no_luck = failed_to_evac;
- failed_to_evac = rtsFalse;
- return (no_luck);
-}
-
-/* -----------------------------------------------------------------------------
- Scavenging mutable lists.
+#ifdef DIST
+ case REMOTE_REF:
+#endif
+ case FETCH_ME:
+ break; // nothing to do in this case
- We treat the mutable list of each generation > N (i.e. all the
- generations older than the one being collected) as roots. We also
- remove non-mutable objects from the mutable list at this point.
- -------------------------------------------------------------------------- */
+ case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ {
+ StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
+ (StgClosure *)fmbq->blocking_queue =
+ evacuate((StgClosure *)fmbq->blocking_queue);
+ IF_DEBUG(gc,
+ debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
+ p, info_type((StgClosure *)p)));
+ break;
+ }
+#endif
-static void
-scavenge_mut_once_list(generation *gen)
-{
- const StgInfoTable *info;
- StgMutClosure *p, *next, *new_list;
+ case TVAR_WAIT_QUEUE:
+ {
+ StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
+ evac_gen = 0;
+ wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
+ wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
+ wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsTrue; // mutable
+ break;
+ }
- p = gen->mut_once_list;
- new_list = END_MUT_LIST;
- next = p->mut_link;
+ case TVAR:
+ {
+ StgTVar *tvar = ((StgTVar *) p);
+ evac_gen = 0;
+ tvar->current_value = evacuate((StgClosure*)tvar->current_value);
+ tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsTrue; // mutable
+ break;
+ }
- evac_gen = gen->no;
- failed_to_evac = rtsFalse;
+ case TREC_HEADER:
+ {
+ StgTRecHeader *trec = ((StgTRecHeader *) p);
+ evac_gen = 0;
+ trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
+ trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsTrue; // mutable
+ break;
+ }
- for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
+ case TREC_CHUNK:
+ {
+ StgWord i;
+ StgTRecChunk *tc = ((StgTRecChunk *) p);
+ TRecEntry *e = &(tc -> entries[0]);
+ evac_gen = 0;
+ tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
+ for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
+ e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
+ e->expected_value = evacuate((StgClosure*)e->expected_value);
+ e->new_value = evacuate((StgClosure*)e->new_value);
+ }
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsTrue; // mutable
+ break;
+ }
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
- info = get_itbl(p);
- /*
- if (info->type==RBH)
- info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
- */
- switch(info->type) {
-
case IND_OLDGEN:
case IND_OLDGEN_PERM:
case IND_STATIC:
/* Try to pull the indirectee into this generation, so we can
* remove the indirection from the mutable list.
*/
- ((StgIndOldGen *)p)->indirectee =
- evacuate(((StgIndOldGen *)p)->indirectee);
+ ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
#if 0 && defined(DEBUG)
if (RtsFlags.DebugFlags.gc)
debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
}
#endif
-
- /* failed_to_evac might happen if we've got more than two
- * generations, we're collecting only generation 0, the
- * indirection resides in generation 2 and the indirectee is
- * in generation 1.
- */
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- p->mut_link = new_list;
- new_list = p;
- } else {
- /* the mut_link field of an IND_STATIC is overloaded as the
- * static link field too (it just so happens that we don't need
- * both at the same time), so we need to NULL it out when
- * removing this object from the mutable list because the static
- * link fields are all assumed to be NULL before doing a major
- * collection.
- */
- p->mut_link = NULL;
- }
- continue;
-
- case MUT_CONS:
- /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
- * it from the mutable list if possible by promoting whatever it
- * points to.
- */
- if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
- /* didn't manage to promote everything, so put the
- * MUT_CONS back on the list.
- */
- p->mut_link = new_list;
- new_list = p;
- }
- continue;
+ break;
default:
- // shouldn't have anything else on the mutables list
- barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
- }
- }
+ barf("scavenge_one: strange object %d", (int)(info->type));
+ }
- gen->mut_once_list = new_list;
+ no_luck = failed_to_evac;
+ failed_to_evac = rtsFalse;
+ return (no_luck);
}
+/* -----------------------------------------------------------------------------
+ Scavenging mutable lists.
+
+ We treat the mutable list of each generation > N (i.e. all the
+ generations older than the one being collected) as roots. We also
+ remove non-mutable objects from the mutable list at this point.
+ -------------------------------------------------------------------------- */
static void
scavenge_mutable_list(generation *gen)
{
- const StgInfoTable *info;
- StgMutClosure *p, *next;
-
- p = gen->saved_mut_list;
- next = p->mut_link;
-
- evac_gen = 0;
- failed_to_evac = rtsFalse;
-
- for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
-
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
- info = get_itbl(p);
- /*
- if (info->type==RBH)
- info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
- */
- switch(info->type) {
-
- case MUT_ARR_PTRS:
- // follow everything
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
- {
- StgPtr end, q;
-
- end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
- *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
- }
- continue;
- }
-
- // Happens if a MUT_ARR_PTRS in the old generation is frozen
- case MUT_ARR_PTRS_FROZEN:
- {
- StgPtr end, q;
-
- evac_gen = gen->no;
- end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
- *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
- }
- evac_gen = 0;
- // Set the mut_link field to NULL, so that we will put this
- // array back on the mutable list if it is subsequently thawed
- // by unsafeThaw#.
- p->mut_link = NULL;
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- mkMutCons((StgClosure *)p, gen);
- }
- continue;
- }
-
- case MUT_VAR:
- ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
- continue;
-
- case MVAR:
- {
- StgMVar *mvar = (StgMVar *)p;
- mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
- mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
- mvar->value = evacuate((StgClosure *)mvar->value);
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
- continue;
- }
-
- case TSO:
- {
- StgTSO *tso = (StgTSO *)p;
-
- scavengeTSO(tso);
-
- /* Don't take this TSO off the mutable list - it might still
- * point to some younger objects (because we set evac_gen to 0
- * above).
- */
- tso->mut_link = gen->mut_list;
- gen->mut_list = (StgMutClosure *)tso;
- continue;
- }
-
- case BLACKHOLE_BQ:
- {
- StgBlockingQueue *bh = (StgBlockingQueue *)p;
- bh->blocking_queue =
- (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
- continue;
- }
-
- /* Happens if a BLACKHOLE_BQ in the old generation is updated:
- */
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
- /* Try to pull the indirectee into this generation, so we can
- * remove the indirection from the mutable list.
- */
- evac_gen = gen->no;
- ((StgIndOldGen *)p)->indirectee =
- evacuate(((StgIndOldGen *)p)->indirectee);
- evac_gen = 0;
-
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- p->mut_link = gen->mut_once_list;
- gen->mut_once_list = p;
- } else {
- p->mut_link = NULL;
- }
- continue;
-
-#if defined(PAR)
- // HWL: check whether all of these are necessary
-
- case RBH: // cf. BLACKHOLE_BQ
- {
- // nat size, ptrs, nonptrs, vhs;
- // char str[80];
- // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
- StgRBH *rbh = (StgRBH *)p;
- (StgClosure *)rbh->blocking_queue =
- evacuate((StgClosure *)rbh->blocking_queue);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)rbh);
- }
- // ToDo: use size of reverted closure here!
- p += BLACKHOLE_sizeW();
- break;
- }
-
- case BLOCKED_FETCH:
- {
- StgBlockedFetch *bf = (StgBlockedFetch *)p;
- // follow the pointer to the node which is being demanded
- (StgClosure *)bf->node =
- evacuate((StgClosure *)bf->node);
- // follow the link to the rest of the blocking queue
- (StgClosure *)bf->link =
- evacuate((StgClosure *)bf->link);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)bf);
- }
- p += sizeofW(StgBlockedFetch);
- break;
- }
-
-#ifdef DIST
- case REMOTE_REF:
- barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
-#endif
- case FETCH_ME:
- p += sizeofW(StgFetchMe);
- break; // nothing to do in this case
-
- case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
- {
- StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
- (StgClosure *)fmbq->blocking_queue =
- evacuate((StgClosure *)fmbq->blocking_queue);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)fmbq);
- }
- p += sizeofW(StgFetchMeBlockingQueue);
- break;
- }
-#endif
-
- case TVAR_WAIT_QUEUE:
- {
- StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
- wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
- wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
- wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
- continue;
- }
+ bdescr *bd;
+ StgPtr p, q;
- case TVAR:
- {
- StgTVar *tvar = ((StgTVar *) p);
- tvar->current_value = evacuate((StgClosure*)tvar->current_value);
- tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
- continue;
- }
+ bd = gen->saved_mut_list;
- case TREC_CHUNK:
- {
- StgWord i;
- StgTRecChunk *tc = ((StgTRecChunk *) p);
- TRecEntry *e = &(tc -> entries[0]);
- tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
- for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
- e->expected_value = evacuate((StgClosure*)e->expected_value);
- e->new_value = evacuate((StgClosure*)e->new_value);
+ evac_gen = gen->no;
+ for (; bd != NULL; bd = bd->link) {
+ for (q = bd->start; q < bd->free; q++) {
+ p = (StgPtr)*q;
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
+ if (scavenge_one(p)) {
+ /* didn't manage to promote everything, so put the
+ * object back on the list.
+ */
+ recordMutableGen((StgClosure *)p,gen);
+ }
}
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
- continue;
- }
-
- case TREC_HEADER:
- {
- StgTRecHeader *trec = ((StgTRecHeader *) p);
- trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
- trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
- continue;
- }
-
- default:
- // shouldn't have anything else on the mutables list
- barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
}
- }
+
+ // free the old mut_list
+ freeChain(gen->saved_mut_list);
+ gen->saved_mut_list = NULL;
}
ind->indirectee = evacuate(ind->indirectee);
/* might fail to evacuate it, in which case we have to pop it
- * back on the mutable list (and take it off the
- * scavenged_static list because the static link and mut link
- * pointers are one and the same).
+ * back on the mutable list of the oldest generation. We
+ * leave it *on* the scavenged_static_objects list, though,
+ * in case we visit this object again.
*/
if (failed_to_evac) {
failed_to_evac = rtsFalse;
- scavenged_static_objects = IND_STATIC_LINK(p);
- ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
- oldest_gen->mut_once_list = (StgMutClosure *)ind;
+ recordMutableGen((StgClosure *)p,oldest_gen);
}
break;
}
p = bd->start;
if (scavenge_one(p)) {
- mkMutCons((StgClosure *)p, stp->gen);
+ recordMutableGen((StgClosure *)p, stp->gen);
}
}
}
}
}
-/* This function is only needed because we share the mutable link
- * field with the static link field in an IND_STATIC, so we have to
- * zero the mut_link field before doing a major GC, which needs the
- * static link field.
- *
- * It doesn't do any harm to zero all the mutable link fields on the
- * mutable list.
- */
-
-static void
-zero_mutable_list( StgMutClosure *first )
-{
- StgMutClosure *next, *c;
-
- for (c = first; c != END_MUT_LIST; c = next) {
- next = c->mut_link;
- c->mut_link = NULL;
- }
-}
-
/* -----------------------------------------------------------------------------
Reverting CAFs
-------------------------------------------------------------------------- */
#if DEBUG
void
-printMutOnceList(generation *gen)
-{
- StgMutClosure *p, *next;
-
- p = gen->mut_once_list;
- next = p->mut_link;
-
- debugBelch("@@ Mut once list %p: ", gen->mut_once_list);
- for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
- debugBelch("%p (%s), ",
- p, info_type((StgClosure *)p));
- }
- debugBelch("\n");
-}
-
-void
printMutableList(generation *gen)
{
- StgMutClosure *p, *next;
+ bdescr *bd;
+ StgPtr p;
- p = gen->mut_list;
- next = p->mut_link;
+ debugBelch("@@ Mutable list %p: ", gen->mut_list);
- debugBelch("@@ Mutable list %p: ", gen->mut_list);
- for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
- debugBelch("%p (%s), ",
- p, info_type((StgClosure *)p));
- }
- debugBelch("\n");
+ for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
+ for (p = bd->start; p < bd->free; p++) {
+ debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));
+ }
+ }
+ debugBelch("\n");
}
STATIC_INLINE rtsBool
see scavenge_large */
return (info->type == MUT_ARR_PTRS ||
info->type == MUT_ARR_PTRS_FROZEN ||
+ info->type == MUT_ARR_PTRS_FROZEN0 ||
info->type == TSO ||
info->type == ARR_WORDS);
}
case STABLE_NAME:
case IND_PERM:
case MUT_VAR:
- case MUT_CONS:
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case IND_OLDGEN:
case IND_OLDGEN_PERM:
- thread((StgPtr)&((StgIndOldGen *)p)->indirectee);
- return p + sizeofW(StgIndOldGen);
+ thread((StgPtr)&((StgInd *)p)->indirectee);
+ return p + sizeofW(StgInd);
case THUNK_SELECTOR:
{
// Rebuild the mutable list for the old generation.
if (ip_MUTABLE(info)) {
- recordMutable((StgMutClosure *)free);
+ recordMutable((StgClosure *)free);
}
// relocate TSOs
return free_blocks;
}
-static void
-thread_mut_once_list( generation *g )
-{
- StgMutClosure *p, *next;
-
- for (p = g->mut_once_list; p != END_MUT_LIST; p = next) {
- next = p->mut_link;
- thread((StgPtr)&p->mut_link);
- }
-
- thread((StgPtr)&g->mut_once_list);
-}
-
void
compact( void (*get_roots)(evac_fn) )
{
// mutable lists
for (g = 1; g < RtsFlags.GcFlags.generations; g++) {
- thread((StgPtr)&generations[g].mut_list);
- thread_mut_once_list(&generations[g]);
+ bdescr *bd;
+ StgPtr p;
+ for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
+ for (p = bd->start; p < bd->free; p++) {
+ thread(p);
+ }
+ }
}
// the global thread list
case WEAK:
case MUT_VAR:
- case MUT_CONS:
case FOREIGN:
case BCO:
case STABLE_NAME:
break;
case IND_PERM:
- size = sizeofW(StgInd);
- break;
-
case IND_OLDGEN_PERM:
- size = sizeofW(StgIndOldGen);
+ size = sizeofW(StgInd);
break;
/*
// because they will perish before the next census at any
// rate.
case IND:
- size = sizeofW(StgInd);
- return size;
-
case IND_OLDGEN:
- size = sizeofW(StgIndOldGen);
+ size = sizeofW(StgInd);
return size;
case EVACUATED:
SymX(stg_IND_STATIC_info) \
SymX(stg_INTLIKE_closure) \
SymX(stg_MUT_ARR_PTRS_FROZEN_info) \
+ SymX(stg_MUT_ARR_PTRS_FROZEN0_info) \
SymX(stg_WEAK_info) \
SymX(stg_ap_0_info) \
SymX(stg_ap_v_info) \
unsafeThawArrayzh_fast
{
- SET_INFO(R1,stg_MUT_ARR_PTRS_info);
-
// SUBTLETY TO DO WITH THE OLD GEN MUTABLE LIST
//
// A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN
// it on the mutable list for the GC to remove (removing something from
// the mutable list is not easy, because the mut_list is only singly-linked).
//
+ // So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list,
+ // when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0 to indicate
+ // that it is still on the mutable list.
+
// So, when we thaw a MUT_ARR_PTRS_FROZEN, we must cope with two cases:
// either it is on a mut_list, or it isn't. We adopt the convention that
// the mut_link field is NULL if it isn't on a mut_list, and the GC
// maintains this invariant.
//
- if (StgMutClosure_mut_link(R1) == NULL) {
+ if (%INFO_TYPE(%GET_STD_INFO(R1)) != HALF_W_(MUT_ARR_PTRS_FROZEN0)) {
foreign "C" recordMutable(R1 "ptr");
}
+ SET_INFO(R1,stg_MUT_ARR_PTRS_info);
+
RET_P(R1);
}
case MVAR:
{
StgMVar* mv = (StgMVar*)obj;
- debugBelch("MVAR(head=%p, link=%p, tail=%p, value=%p)\n", mv->head, mv->mut_link, mv->tail, mv->value);
+ debugBelch("MVAR(head=%p, tail=%p, value=%p)\n", mv->head, mv->tail, mv->value);
break;
}
case MUT_VAR:
{
StgMutVar* mv = (StgMutVar*)obj;
- debugBelch("MUT_VAR(var=%p, link=%p)\n", mv->var, mv->mut_link);
+ debugBelch("MUT_VAR(var=%p)\n", mv->var);
break;
}
case FOREIGN:
case STABLE_NAME:
case MUT_VAR:
- case MUT_CONS:
prim = rtsTrue;
size = sizeW_fromITBL(info);
break;
// one child (fixed), no SRT
case MUT_VAR:
- case MUT_CONS:
*first_child = ((StgMutVar *)c)->var;
return;
case BLACKHOLE_BQ:
case IND_PERM:
case IND_OLDGEN_PERM:
case IND_OLDGEN:
- *first_child = ((StgIndOldGen *)c)->indirectee;
+ *first_child = ((StgInd *)c)->indirectee;
return;
case CONSTR_1_0:
case CONSTR_1_1:
case ARR_WORDS:
// one child (fixed), no SRT
case MUT_VAR:
- case MUT_CONS:
case BLACKHOLE_BQ:
case THUNK_SELECTOR:
case IND_PERM:
// mutable objects
case MVAR:
case MUT_VAR:
- case MUT_CONS:
case MUT_ARR_PTRS:
case MUT_ARR_PTRS_FROZEN:
StgWeak *weak;
RetainerSet *rtl;
nat g;
- StgMutClosure *ml;
+ StgPtr ml;
+ bdescr *bd;
#ifdef DEBUG_RETAINER
RetainerSet tmpRetainerSet;
#endif
// object (computing sumOfNewCostExtra and updating costArray[] when
// debugging retainer profiler).
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- ASSERT(g != 0 ||
- (generations[g].mut_list == END_MUT_LIST &&
- generations[g].mut_once_list == END_MUT_LIST));
-
- // Todo:
- // I think traversing through mut_list is unnecessary.
- // Think about removing this part.
- for (ml = generations[g].mut_list; ml != END_MUT_LIST;
- ml = ml->mut_link) {
+ ASSERT(g != 0 || (generations[g].mut_list == NULL))
- maybeInitRetainerSet((StgClosure *)ml);
- rtl = retainerSetOf((StgClosure *)ml);
-
-#ifdef DEBUG_RETAINER
- if (rtl == NULL) {
- // first visit to *ml
- // This is a violation of the interface rule!
- RSET(ml) = (RetainerSet *)((StgWord)(&tmpRetainerSet) | flip);
-
- switch (get_itbl((StgClosure *)ml)->type) {
- case IND_STATIC:
- // no cost involved
- break;
- case CONSTR_INTLIKE:
- case CONSTR_CHARLIKE:
- case CONSTR_NOCAF_STATIC:
- case CONSTR_STATIC:
- case THUNK_STATIC:
- case FUN_STATIC:
- barf("Invalid object in computeRetainerSet(): %d", get_itbl((StgClosure*)ml)->type);
- break;
- default:
- // dynamic objects
- costArray[get_itbl((StgClosure *)ml)->type] += cost((StgClosure *)ml);
- sumOfNewCostExtra += cost((StgClosure *)ml);
- break;
- }
- }
-#endif
- }
-
- // Traversing through mut_once_list is, in contrast, necessary
+ // Traversing through mut_list is necessary
// because we can find MUT_VAR objects which have not been
// visited during retainer profiling.
- for (ml = generations[g].mut_once_list; ml != END_MUT_LIST;
- ml = ml->mut_link) {
+ for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
+ for (ml = bd->start; ml < bd->free; ml++) {
+
+ maybeInitRetainerSet((StgClosure *)ml);
+ rtl = retainerSetOf((StgClosure *)ml);
- maybeInitRetainerSet((StgClosure *)ml);
- rtl = retainerSetOf((StgClosure *)ml);
#ifdef DEBUG_RETAINER
- if (rtl == NULL) {
- // first visit to *ml
- // This is a violation of the interface rule!
- RSET(ml) = (RetainerSet *)((StgWord)(&tmpRetainerSet) | flip);
-
- switch (get_itbl((StgClosure *)ml)->type) {
- case IND_STATIC:
- // no cost involved
- break;
- case CONSTR_INTLIKE:
- case CONSTR_CHARLIKE:
- case CONSTR_NOCAF_STATIC:
- case CONSTR_STATIC:
- case THUNK_STATIC:
- case FUN_STATIC:
- barf("Invalid object in computeRetainerSet(): %d", get_itbl((StgClosure*)ml)->type);
- break;
- default:
- // dynamic objects
- costArray[get_itbl((StgClosure *)ml)->type] += cost((StgClosure *)ml);
- sumOfNewCostExtra += cost((StgClosure *)ml);
- break;
+ if (rtl == NULL) {
+ // first visit to *ml
+ // This is a violation of the interface rule!
+ RSET(ml) = (RetainerSet *)((StgWord)(&tmpRetainerSet) | flip);
+
+ switch (get_itbl((StgClosure *)ml)->type) {
+ case IND_STATIC:
+ // no cost involved
+ break;
+ case CONSTR_INTLIKE:
+ case CONSTR_CHARLIKE:
+ case CONSTR_NOCAF_STATIC:
+ case CONSTR_STATIC:
+ case THUNK_STATIC:
+ case FUN_STATIC:
+ barf("Invalid object in computeRetainerSet(): %d", get_itbl((StgClosure*)ml)->type);
+ break;
+ default:
+ // dynamic objects
+ costArray[get_itbl((StgClosure *)ml)->type] += cost((StgClosure *)ml);
+ sumOfNewCostExtra += cost((StgClosure *)ml);
+ break;
+ }
}
- }
#endif
+ }
}
}
}
case FUN_0_2:
case WEAK:
case MUT_VAR:
- case MUT_CONS:
case CAF_BLACKHOLE:
case BLACKHOLE:
case SE_BLACKHOLE:
case FOREIGN:
case STABLE_NAME:
case MUT_VAR:
- case MUT_CONS:
case CONSTR_INTLIKE:
case CONSTR_CHARLIKE:
case CONSTR_STATIC:
case THUNK_SELECTOR:
ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee));
- return sizeofW(StgHeader) + MIN_UPD_SIZE;
+ return THUNK_SELECTOR_sizeW();
case IND:
{
-------------------------------------------------------------------------- */
void
-checkMutableList( StgMutClosure *p, nat gen )
+checkMutableList( bdescr *mut_bd, nat gen )
{
bdescr *bd;
+ StgPtr q;
+ StgClosure *p;
- for (; p != END_MUT_LIST; p = p->mut_link) {
- bd = Bdescr((P_)p);
- ASSERT(closure_MUTABLE(p));
- ASSERT(bd->gen_no == gen);
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
- }
-}
-
-void
-checkMutOnceList( StgMutClosure *p, nat gen )
-{
- bdescr *bd;
- StgInfoTable *info;
-
- for (; p != END_MUT_LIST; p = p->mut_link) {
- bd = Bdescr((P_)p);
- info = get_itbl(p);
-
- ASSERT(!closure_MUTABLE(p));
- ASSERT(ip_STATIC(info) || bd->gen_no == gen);
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link));
-
- switch (info->type) {
- case IND_STATIC:
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
- case MUT_CONS:
- break;
- default:
- barf("checkMutOnceList: strange closure %p (%s)",
- p, info_type((StgClosure *)p));
+ for (bd = mut_bd; bd != NULL; bd = bd->link) {
+ for (q = bd->start; q < bd->free; q++) {
+ p = (StgClosure *)*q;
+ ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen);
}
}
}
extern StgOffset checkStackFrame ( StgPtr sp );
extern StgOffset checkClosure ( StgClosure* p );
-extern void checkMutableList ( StgMutClosure *p, nat gen );
-extern void checkMutOnceList ( StgMutClosure *p, nat gen );
+extern void checkMutableList ( bdescr *bd, nat gen );
#if defined(GRAN)
extern void checkTSOsSanity(void);
#endif
ready_to_gc = rtsTrue;
- context_switch = 1; /* stop other threads ASAP */
PUSH_ON_RUN_QUEUE(t);
/* actual GC is done at the end of the while loop */
break;
tso->link = dest;
tso->sp = (P_)&(tso->stack[tso->stack_size]);
tso->why_blocked = NotBlocked;
- dest->mut_link = NULL;
IF_PAR_DEBUG(verbose,
debugBelch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld\n",
void
statDescribeGens(void)
{
- nat g, s, mut, mut_once, lge, live;
- StgMutClosure *m;
+ nat g, s, mut, lge, live;
bdescr *bd;
step *step;
- debugBelch(" Gen Steps Max Mutable Mut-Once Step Blocks Live Large\n Blocks Closures Closures Objects\n");
+ debugBelch(" Gen Steps Max Mutable Step Blocks Live Large\n Blocks Closures Closures Objects\n");
+ mut = 0;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (m = generations[g].mut_list, mut = 0; m != END_MUT_LIST;
- m = m->mut_link)
- mut++;
- for (m = generations[g].mut_once_list, mut_once = 0; m != END_MUT_LIST;
- m = m->mut_link)
- mut_once++;
- debugBelch("%8d %8d %8d %9d %9d", g, generations[g].n_steps,
- generations[g].max_blocks, mut, mut_once);
+ for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
+ mut += bd->free - bd->start;
+ }
+
+ debugBelch("%8d %8d %8d %9d", g, generations[g].n_steps,
+ generations[g].max_blocks, mut);
for (s = 0; s < generations[g].n_steps; s++) {
step = &generations[g].steps[s];
jump %GET_ENTRY(R1);
}
-INFO_TABLE(stg_IND_PERM,1,1,IND_PERM,"IND_PERM","IND_PERM")
+INFO_TABLE(stg_IND_PERM,1,0,IND_PERM,"IND_PERM","IND_PERM")
{
/* Don't add INDs to granularity cost */
}
-INFO_TABLE(stg_IND_OLDGEN,1,1,IND_OLDGEN,"IND_OLDGEN","IND_OLDGEN")
+INFO_TABLE(stg_IND_OLDGEN,1,0,IND_OLDGEN,"IND_OLDGEN","IND_OLDGEN")
{
TICK_ENT_STATIC_IND(); /* tick */
R1 = StgInd_indirectee(R1);
jump %GET_ENTRY(R1);
}
-INFO_TABLE(stg_IND_OLDGEN_PERM,1,1,IND_OLDGEN_PERM,"IND_OLDGEN_PERM","IND_OLDGEN_PERM")
+INFO_TABLE(stg_IND_OLDGEN_PERM,1,0,IND_OLDGEN_PERM,"IND_OLDGEN_PERM","IND_OLDGEN_PERM")
{
/* Don't: TICK_ENT_STATIC_IND(Node); for ticky-ticky;
this ind is here only to help profiling */
* for the blocking queue in a BQ), which should be big enough for an
* old-generation indirection.
*/
-INFO_TABLE(stg_BLACKHOLE,0,2,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
+INFO_TABLE(stg_BLACKHOLE,0,1,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
{
#if defined(GRAN)
/* Before overwriting TSO_LINK */
jump stg_block_1;
}
-INFO_TABLE(stg_BLACKHOLE_BQ,1,1,BLACKHOLE_BQ,"BLACKHOLE_BQ","BLACKHOLE_BQ")
+INFO_TABLE(stg_BLACKHOLE_BQ,1,0,BLACKHOLE_BQ,"BLACKHOLE_BQ","BLACKHOLE_BQ")
{
#if defined(GRAN)
/* Before overwriting TSO_LINK */
#endif /* defined(PAR) || defined(GRAN) */
/* identical to BLACKHOLEs except for the infotag */
-INFO_TABLE(stg_CAF_BLACKHOLE,0,2,CAF_BLACKHOLE,"CAF_BLACKHOLE","CAF_BLACKHOLE")
+INFO_TABLE(stg_CAF_BLACKHOLE,0,1,CAF_BLACKHOLE,"CAF_BLACKHOLE","CAF_BLACKHOLE")
{
#if defined(GRAN)
/* mainly statistics gathering for GranSim simulation */
}
#ifdef EAGER_BLACKHOLING
-INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,IF_,"SE_BLACKHOLE","SE_BLACKHOLE");
+INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,1,SE_BLACKHOLE,,IF_,"SE_BLACKHOLE","SE_BLACKHOLE");
IF_(stg_SE_BLACKHOLE_entry)
{
STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1);
STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
}
-INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,IF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
+INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,1,SE_CAF_BLACKHOLE,,IF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
IF_(stg_SE_CAF_BLACKHOLE_entry)
{
STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1);
and entry code for each type.
------------------------------------------------------------------------- */
-INFO_TABLE(stg_FULL_MVAR,4,0,MVAR,"MVAR","MVAR")
+INFO_TABLE(stg_FULL_MVAR,3,0,MVAR,"MVAR","MVAR")
{ foreign "C" barf("FULL_MVAR object entered!"); }
-INFO_TABLE(stg_EMPTY_MVAR,4,0,MVAR,"MVAR","MVAR")
+INFO_TABLE(stg_EMPTY_MVAR,3,0,MVAR,"MVAR","MVAR")
{ foreign "C" barf("EMPTY_MVAR object entered!"); }
/* -----------------------------------------------------------------------------
CLOSURE(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE);
/* ----------------------------------------------------------------------------
- Mutable lists
-
- Mutable lists (used by the garbage collector) consist of a chain of
- StgMutClosures connected through their mut_link fields, ending in
- an END_MUT_LIST closure.
- ------------------------------------------------------------------------- */
-
-INFO_TABLE_CONSTR(stg_END_MUT_LIST,0,0,0,CONSTR_NOCAF_STATIC,"END_MUT_LIST","END_MUT_LIST")
-{ foreign "C" barf("END_MUT_LIST object entered!"); }
-
-CLOSURE(stg_END_MUT_LIST_closure,stg_END_MUT_LIST);
-
-INFO_TABLE(stg_MUT_CONS, 1, 1, MUT_CONS, "MUT_CONS", "MUT_CONS")
-{ foreign "C" barf("MUT_CONS object entered!"); }
-
-/* ----------------------------------------------------------------------------
Exception lists
------------------------------------------------------------------------- */
INFO_TABLE(stg_MUT_ARR_PTRS_FROZEN, 0, 0, MUT_ARR_PTRS_FROZEN, "MUT_ARR_PTRS_FROZEN", "MUT_ARR_PTRS_FROZEN")
{ foreign "C" barf("MUT_ARR_PTRS_FROZEN object entered!"); }
+INFO_TABLE(stg_MUT_ARR_PTRS_FROZEN0, 0, 0, MUT_ARR_PTRS_FROZEN0, "MUT_ARR_PTRS_FROZEN0", "MUT_ARR_PTRS_FROZEN0")
+{ foreign "C" barf("MUT_ARR_PTRS_FROZEN0 object entered!"); }
+
/* ----------------------------------------------------------------------------
Mutable Variables
------------------------------------------------------------------------- */
-INFO_TABLE(stg_MUT_VAR, 1, 1, MUT_VAR, "MUT_VAR", "MUT_VAR")
+INFO_TABLE(stg_MUT_VAR, 1, 0, MUT_VAR, "MUT_VAR", "MUT_VAR")
{ foreign "C" barf("MUT_VAR object entered!"); }
/* ----------------------------------------------------------------------------
for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
gen = &generations[g];
gen->no = g;
- gen->mut_list = END_MUT_LIST;
- gen->mut_once_list = END_MUT_LIST;
+ gen->mut_list = allocBlock();
gen->collections = 0;
gen->failed_promotions = 0;
gen->max_blocks = 0;
ACQUIRE_SM_LOCK;
((StgIndStatic *)caf)->saved_info = NULL;
- ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
- oldest_gen->mut_once_list = (StgMutClosure *)caf;
+
+ recordMutableGen(caf, oldest_gen);
RELEASE_SM_LOCK;
/* count the blocks we current have */
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- stp = &generations[g].steps[s];
- total_blocks += stp->n_blocks;
- if (RtsFlags.GcFlags.generations == 1) {
- /* two-space collector has a to-space too :-) */
- total_blocks += g0s0->n_to_blocks;
+ for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
+ total_blocks += bd->blocks;
}
- for (bd = stp->large_objects; bd; bd = bd->link) {
- total_blocks += bd->blocks;
- /* hack for megablock groups: they have an extra block or two in
- the second and subsequent megablocks where the block
- descriptors would normally go.
- */
- if (bd->blocks > BLOCKS_PER_MBLOCK) {
- total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
- * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
- }
+ for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+ total_blocks += stp->n_blocks;
+ if (RtsFlags.GcFlags.generations == 1) {
+ /* two-space collector has a to-space too :-) */
+ total_blocks += g0s0->n_to_blocks;
+ }
+ for (bd = stp->large_objects; bd; bd = bd->link) {
+ total_blocks += bd->blocks;
+ /* hack for megablock groups: they have an extra block or two in
+ the second and subsequent megablocks where the block
+ descriptors would normally go.
+ */
+ if (bd->blocks > BLOCKS_PER_MBLOCK) {
+ total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
+ * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
+ }
+ }
}
- }
}
/* any blocks held by allocate() */
checkChain(generations[g].steps[s].large_objects);
if (g > 0) {
checkMutableList(generations[g].mut_list, g);
- checkMutOnceList(generations[g].mut_once_list, g);
}
}
}
arr = (StgMutArrPtrs *)allocate(sizeofW(StgMutArrPtrs) + n);
TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0);
SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_info, CCS_SYSTEM);
- arr->mut_link = NULL;
arr->ptrs = n;
n = 0;