From: simonmar Date: Thu, 10 Feb 2005 13:02:40 +0000 (+0000) Subject: [project @ 2005-02-10 13:01:52 by simonmar] X-Git-Tag: Initial_conversion_from_CVS_complete~1088 X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=commitdiff_plain;h=e7c3f957fd36fd9f6369183b7a31e2a4a4c21b43 [project @ 2005-02-10 13:01:52 by simonmar] GC changes: instead of threading old-generation mutable lists through objects in the heap, keep it in a separate flat array. This has some advantages: - the IND_OLDGEN object is now only 2 words, so the minimum size of a THUNK is now 2 words instead of 3. This saves some amount of allocation (about 2% on average according to my measurements), and is more friendly to the cache by squashing objects together more. - keeping the mutable list separate from the IND object will be necessary for our multiprocessor implementation. - removing the mut_link field makes the layout of some objects more uniform, leading to less complexity and special cases. - I also unified the two mutable lists (mut_once_list and mut_list) into a single mutable list, which lead to more simplifications in the GC. --- diff --git a/ghc/compiler/cmm/CLabel.hs b/ghc/compiler/cmm/CLabel.hs index e732321..feec598 100644 --- a/ghc/compiler/cmm/CLabel.hs +++ b/ghc/compiler/cmm/CLabel.hs @@ -345,7 +345,7 @@ mkUpdInfoLabel = RtsLabel (RtsInfo SLIT("stg_upd_frame")) mkSeqInfoLabel = RtsLabel (RtsInfo SLIT("stg_seq_frame")) mkIndStaticInfoLabel = RtsLabel (RtsInfo SLIT("stg_IND_STATIC")) mkMainCapabilityLabel = RtsLabel (RtsData SLIT("MainCapability")) -mkMAP_FROZEN_infoLabel = RtsLabel (RtsInfo SLIT("stg_MUT_ARR_PTRS_FROZEN")) +mkMAP_FROZEN_infoLabel = RtsLabel (RtsInfo SLIT("stg_MUT_ARR_PTRS_FROZEN0")) mkEMPTY_MVAR_infoLabel = RtsLabel (RtsInfo SLIT("stg_EMPTY_MVAR")) mkTopTickyCtrLabel = RtsLabel (RtsData SLIT("top_ct")) diff --git a/ghc/compiler/cmm/CmmParse.y b/ghc/compiler/cmm/CmmParse.y index 4b25d45..b852eb3 100644 --- a/ghc/compiler/cmm/CmmParse.y +++ b/ghc/compiler/cmm/CmmParse.y @@ -186,7 +186,7 @@ static :: { ExtFCode [CmmStatic] } { do lits <- sequence $4; return $ map CmmStaticLit $ mkStaticClosure (mkRtsInfoLabelFS $3) - dontCareCCS (map getLit lits) [] [] } + dontCareCCS (map getLit lits) [] [] [] } -- arrays of closures required for the CHARLIKE & INTLIKE arrays lits :: { [ExtFCode CmmExpr] } @@ -712,7 +712,7 @@ funInfo name ptrs nptrs cl_type desc_str ty_str fun_type = do staticClosure :: FastString -> FastString -> [CmmLit] -> ExtCode staticClosure cl_label info payload = code $ emitDataLits (mkRtsDataLabelFS cl_label) lits - where lits = mkStaticClosure (mkRtsInfoLabelFS info) dontCareCCS payload [] [] + where lits = mkStaticClosure (mkRtsInfoLabelFS info) dontCareCCS payload [] [] [] foreignCall :: String diff --git a/ghc/compiler/codeGen/CgHeapery.lhs b/ghc/compiler/codeGen/CgHeapery.lhs index 58fbe94..b0bdf46 100644 --- a/ghc/compiler/codeGen/CgHeapery.lhs +++ b/ghc/compiler/codeGen/CgHeapery.lhs @@ -1,7 +1,7 @@ % % (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 % -% $Id: CgHeapery.lhs,v 1.42 2004/11/26 16:20:09 simonmar Exp $ +% $Id: CgHeapery.lhs,v 1.43 2005/02/10 13:01:53 simonmar Exp $ % \section[CgHeapery]{Heap management functions} @@ -34,9 +34,8 @@ import CgTicky ( staticTickyHdr, tickyDynAlloc, tickyAllocHeap ) import CgParallel ( staticGranHdr, staticParHdr, doGranAllocate ) import CgStackery ( getFinalStackHW, getRealSp ) import CgCallConv ( mkRegLiveness ) -import ClosureInfo ( closureSize, closureUpdReqd, - staticClosureNeedsLink, - mkConInfo, +import ClosureInfo ( closureSize, staticClosureNeedsLink, + mkConInfo, closureNeedsUpdSpace, infoTableLabelFromCI, closureLabelFromCI, nodeMustPointToIt, closureLFInfo, ClosureInfo ) @@ -189,26 +188,37 @@ mkStaticClosureFields -> [CmmLit] -- Payload -> [CmmLit] -- The full closure mkStaticClosureFields cl_info ccs caf_refs payload - = mkStaticClosure info_lbl ccs payload padding_wds static_link_field + = mkStaticClosure info_lbl ccs payload padding_wds + static_link_field saved_info_field where info_lbl = infoTableLabelFromCI cl_info - upd_reqd = closureUpdReqd cl_info + -- CAFs must have consistent layout, regardless of whether they + -- are actually updatable or not. The layout of a CAF is: + -- + -- 3 saved_info + -- 2 static_link + -- 1 indirectee + -- 0 info ptr + -- + -- the static_link and saved_info fields must always be in the same + -- place. So we use closureNeedsUpdSpace rather than + -- closureUpdReqd here: + + is_caf = closureNeedsUpdSpace cl_info - -- for the purposes of laying out the static closure, we consider all - -- thunks to be "updatable", so that the static link field is always - -- in the same place. padding_wds - | not upd_reqd = [] - | otherwise = replicate n (mkIntCLit 0) -- a bunch of 0s + | not is_caf = [] + | otherwise = replicate n (mkIntCLit 0) -- a bunch of 0s where n = max 0 (mIN_UPD_SIZE - length payload) - -- We always have a static link field for a thunk, it's used to - -- save the closure's info pointer when we're reverting CAFs - -- (see comment in Storage.c) static_link_field - | upd_reqd || staticClosureNeedsLink cl_info = [static_link_value] - | otherwise = [] + | is_caf || staticClosureNeedsLink cl_info = [static_link_value] + | otherwise = [] + + saved_info_field + | is_caf = [mkIntCLit 0] + | otherwise = [] -- for a static constructor which has NoCafRefs, we set the -- static link field to a non-zero value so the garbage @@ -218,13 +228,14 @@ mkStaticClosureFields cl_info ccs caf_refs payload | otherwise = mkIntCLit 1 mkStaticClosure :: CLabel -> CostCentreStack -> [CmmLit] - -> [CmmLit] -> [CmmLit] -> [CmmLit] -mkStaticClosure info_lbl ccs payload padding_wds static_link_field + -> [CmmLit] -> [CmmLit] -> [CmmLit] -> [CmmLit] +mkStaticClosure info_lbl ccs payload padding_wds static_link_field saved_info_field = [CmmLabel info_lbl] ++ variable_header_words ++ payload ++ padding_wds ++ static_link_field + ++ saved_info_field where variable_header_words = staticGranHdr diff --git a/ghc/compiler/codeGen/CgPrimOp.hs b/ghc/compiler/codeGen/CgPrimOp.hs index 5c01903..52f6551 100644 --- a/ghc/compiler/codeGen/CgPrimOp.hs +++ b/ghc/compiler/codeGen/CgPrimOp.hs @@ -175,7 +175,7 @@ emitPrimOp [res] DataToTagOp [arg] live -- #define unsafeFreezzeArrayzh(r,a) -- { --- SET_INFO((StgClosure *)a,&stg_MUT_ARR_PTRS_FROZEN_info); +-- SET_INFO((StgClosure *)a,&stg_MUT_ARR_PTRS_FROZEN0_info); -- r = a; -- } emitPrimOp [res] UnsafeFreezeArrayOp [arg] live diff --git a/ghc/compiler/codeGen/ClosureInfo.lhs b/ghc/compiler/codeGen/ClosureInfo.lhs index f1b2540..dbd4314 100644 --- a/ghc/compiler/codeGen/ClosureInfo.lhs +++ b/ghc/compiler/codeGen/ClosureInfo.lhs @@ -29,7 +29,8 @@ module ClosureInfo ( closureName, infoTableLabelFromCI, closureLabelFromCI, closureSRT, - closureLFInfo, closureSMRep, closureUpdReqd, + closureLFInfo, closureSMRep, closureUpdReqd, + closureNeedsUpdSpace, closureSingleEntry, closureReEntrant, isConstrClosure_maybe, closureFunInfo, isStandardFormThunk, isKnownFun, diff --git a/ghc/includes/Block.h b/ghc/includes/Block.h index d7599c5..37d17a5 100644 --- a/ghc/includes/Block.h +++ b/ghc/includes/Block.h @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Block.h,v 1.17 2004/08/13 13:09:09 simonmar Exp $ + * $Id: Block.h,v 1.18 2005/02/10 13:02:00 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -142,9 +142,11 @@ INLINE_HEADER bdescr *Bdescr(StgPtr p) (1 + (W_)MBLOCK_ROUND_UP((n-BLOCKS_PER_MBLOCK) * BLOCK_SIZE) / MBLOCK_SIZE) +#ifndef CMINUSMINUS +// to the end... + /* Double-linked block lists: --------------------------------------------- */ -#ifndef CMINUSMINUS INLINE_HEADER void dbl_link_onto(bdescr *bd, bdescr **list) { @@ -155,6 +157,37 @@ dbl_link_onto(bdescr *bd, bdescr **list) } *list = bd; } -#endif +/* Initialisation ---------------------------------------------------------- */ + +extern void initBlockAllocator(void); + +/* Allocation -------------------------------------------------------------- */ + +extern bdescr *allocGroup(nat n); +extern bdescr *allocBlock(void); + +/* De-Allocation ----------------------------------------------------------- */ + +extern void freeGroup(bdescr *p); +extern void freeChain(bdescr *p); + +/* Round a value to megablocks --------------------------------------------- */ + +#define WORDS_PER_MBLOCK (BLOCKS_PER_MBLOCK * BLOCK_SIZE_W) + +INLINE_HEADER nat +round_to_mblocks(nat words) +{ + if (words > WORDS_PER_MBLOCK) { + if ((words % WORDS_PER_MBLOCK) < (WORDS_PER_MBLOCK / 2)) { + words = (words / WORDS_PER_MBLOCK) * WORDS_PER_MBLOCK; + } else { + words = ((words / WORDS_PER_MBLOCK) + 1) * WORDS_PER_MBLOCK; + } + } + return words; +} + +#endif /* !CMINUSMINUS */ #endif /* BLOCK_H */ diff --git a/ghc/includes/ClosureMacros.h b/ghc/includes/ClosureMacros.h index e2519bb..12023a5 100644 --- a/ghc/includes/ClosureMacros.h +++ b/ghc/includes/ClosureMacros.h @@ -174,16 +174,9 @@ /* These macros are optimised versions of the above for certain * closure types. They *must* be equivalent to the generic * STATIC_LINK. - * - * You may be surprised that the STATIC_LINK field for a THUNK_STATIC - * is at offset 2; that's because a THUNK_STATIC always has two words - * of (non-ptr) padding, to make room for the IND_STATIC that is - * going to overwrite it. It doesn't do any harm, because a - * THUNK_STATIC needs this extra word for the IND_STATIC's saved_info - * field anyhow. Hmm, this is all rather delicate. --SDM */ #define FUN_STATIC_LINK(p) ((p)->payload[0]) -#define THUNK_STATIC_LINK(p) ((p)->payload[2]) +#define THUNK_STATIC_LINK(p) ((p)->payload[1]) #define IND_STATIC_LINK(p) ((p)->payload[1]) #define STATIC_LINK2(info,p) \ diff --git a/ghc/includes/ClosureTypes.h b/ghc/includes/ClosureTypes.h index f727fc7..3e2b7cf 100644 --- a/ghc/includes/ClosureTypes.h +++ b/ghc/includes/ClosureTypes.h @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- - * $Id: ClosureTypes.h,v 1.19 2004/11/18 09:56:17 tharris Exp $ + * $Id: ClosureTypes.h,v 1.20 2005/02/10 13:02:02 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -66,9 +66,9 @@ #define MVAR 50 #define ARR_WORDS 51 #define MUT_ARR_PTRS 52 -#define MUT_ARR_PTRS_FROZEN 53 -#define MUT_VAR 54 -#define MUT_CONS 55 +#define MUT_ARR_PTRS_FROZEN0 53 +#define MUT_ARR_PTRS_FROZEN 54 +#define MUT_VAR 55 #define WEAK 56 #define FOREIGN 57 #define STABLE_NAME 58 @@ -87,5 +87,5 @@ #define CATCH_RETRY_FRAME 71 #define CATCH_STM_FRAME 72 #define N_CLOSURE_TYPES 73 - + #endif /* CLOSURETYPES_H */ diff --git a/ghc/includes/Closures.h b/ghc/includes/Closures.h index d160ac5..7cb4a52 100644 --- a/ghc/includes/Closures.h +++ b/ghc/includes/Closures.h @@ -66,19 +66,6 @@ struct StgClosure_ { struct StgClosure_ *payload[FLEXIBLE_ARRAY]; }; -/* What a stroke of luck - all our mutable closures follow the same - * basic layout, with the mutable link field as the second field after - * the header. This means the following structure is the supertype of - * mutable closures. - */ - -typedef struct StgMutClosure_ { - StgHeader header; - StgWord padding; - struct StgMutClosure_ *mut_link; - struct StgClosure_ *payload[FLEXIBLE_ARRAY]; -} StgMutClosure; - typedef struct { StgHeader header; StgClosure *selectee; @@ -108,12 +95,6 @@ typedef struct { } StgInd; typedef struct { - StgHeader header; - StgClosure *indirectee; - StgMutClosure *mut_link; -} StgIndOldGen; - -typedef struct { StgHeader header; StgClosure *indirectee; StgClosure *static_link; @@ -129,14 +110,12 @@ typedef struct { typedef struct { StgHeader header; StgWord ptrs; - StgMutClosure *mut_link; /* mutable list */ StgClosure *payload[FLEXIBLE_ARRAY]; } StgMutArrPtrs; typedef struct { StgHeader header; StgClosure *var; - StgMutClosure *mut_link; } StgMutVar; typedef struct _StgUpdateFrame { @@ -303,7 +282,6 @@ typedef struct { typedef struct { StgHeader header; struct StgTSO_ *head; - StgMutClosure *mut_link; struct StgTSO_ *tail; StgClosure* value; } StgMVar; @@ -329,7 +307,6 @@ typedef struct { typedef struct StgTVarWaitQueue_ { StgHeader header; struct StgTSO_ *waiting_tso; - StgMutClosure *mut_link; struct StgTVarWaitQueue_ *next_queue_entry; struct StgTVarWaitQueue_ *prev_queue_entry; } StgTVarWaitQueue; @@ -337,7 +314,6 @@ typedef struct StgTVarWaitQueue_ { typedef struct { StgHeader header; StgClosure *current_value; - StgMutClosure *mut_link; StgTVarWaitQueue *first_wait_queue_entry; } StgTVar; @@ -354,7 +330,6 @@ typedef struct { typedef struct StgTRecChunk_ { StgHeader header; struct StgTRecChunk_ *prev_chunk; - StgMutClosure *mut_link; StgWord next_entry_idx; TRecEntry entries[TREC_CHUNK_NUM_ENTRIES]; } StgTRecChunk; @@ -371,7 +346,6 @@ typedef enum { typedef struct StgTRecHeader_ { StgHeader header; TRecState state; - StgMutClosure *mut_link; struct StgTRecHeader_ *enclosing_trec; StgTRecChunk *current_chunk; } StgTRecHeader; @@ -401,8 +375,7 @@ typedef struct { of closures that can be found on a blocking queue: StgTSO, StgRBHSave, StgBlockedFetch. (StgRBHSave can only appear at the end of a blocking queue). Logically, this is a union type, but defining another struct - with a common layout is easier to handle in the code (same as for - StgMutClosures). + with a common layout is easier to handle in the code. Note that in the standard setup only StgTSOs can be on a blocking queue. This is one of the main reasons for slightly different code in files such as Schedule.c. @@ -410,7 +383,6 @@ typedef struct { typedef struct StgBlockingQueueElement_ { StgHeader header; struct StgBlockingQueueElement_ *link; /* next elem in BQ */ - StgMutClosure *mut_link; /* next elem in mutable list */ struct StgClosure_ *payload[FLEXIBLE_ARRAY];/* contents of the closure */ } StgBlockingQueueElement; @@ -418,7 +390,6 @@ typedef struct StgBlockingQueueElement_ { typedef struct StgBlockingQueue_ { StgHeader header; struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */ - StgMutClosure *mut_link; /* next elem in mutable list */ } StgBlockingQueue; /* this closure is hanging at the end of a blocking queue in (see RBH.c) */ @@ -430,7 +401,6 @@ typedef struct StgRBHSave_ { typedef struct StgRBH_ { StgHeader header; struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */ - StgMutClosure *mut_link; /* next elem in mutable list */ } StgRBH; #else @@ -438,7 +408,6 @@ typedef struct StgRBH_ { typedef struct StgBlockingQueue_ { StgHeader header; struct StgTSO_ *blocking_queue; - StgMutClosure *mut_link; } StgBlockingQueue; #endif @@ -448,14 +417,12 @@ typedef struct StgBlockingQueue_ { typedef struct StgFetchMe_ { StgHeader header; globalAddr *ga; /* ptr to unique id for a closure */ - StgMutClosure *mut_link; /* next elem in mutable list */ } StgFetchMe; /* same contents as an ordinary StgBlockingQueue */ typedef struct StgFetchMeBlockingQueue_ { StgHeader header; struct StgBlockingQueueElement_ *blocking_queue; /* start of the BQ */ - StgMutClosure *mut_link; /* next elem in mutable list */ } StgFetchMeBlockingQueue; /* This is an entry in a blocking queue. It indicates a fetch request from a @@ -467,7 +434,6 @@ typedef struct StgFetchMeBlockingQueue_ { typedef struct StgBlockedFetch_ { StgHeader header; struct StgBlockingQueueElement_ *link; /* next elem in the BQ */ - StgMutClosure *mut_link; /* next elem in mutable list */ StgClosure *node; /* node to fetch */ globalAddr ga; /* where to send the result to */ } StgBlockedFetch; /* NB: not just a ptr to a GA */ diff --git a/ghc/includes/Constants.h b/ghc/includes/Constants.h index 579705e..b4d66cb 100644 --- a/ghc/includes/Constants.h +++ b/ghc/includes/Constants.h @@ -1,5 +1,5 @@ /* ---------------------------------------------------------------------------- - * $Id: Constants.h,v 1.27 2004/11/18 09:56:19 tharris Exp $ + * $Id: Constants.h,v 1.28 2005/02/10 13:02:03 simonmar Exp $ * * (c) The GHC Team, 1998-2002 * @@ -21,12 +21,12 @@ /* ----------------------------------------------------------------------------- Minimum closure sizes - Here we define the minimum size for updatable closures. This must be at - least 2, to allow for cons cells and linked indirections. All updates + Here we define the minimum size for updatable closures. All updates will be performed on closures of this size. For non-updatable closures the minimum size is 1 to allow for a forwarding pointer. - Linked indirections are UPD_OLDGEN things: see Closures.h + When we used to keep the mutable list threaded through closures on + the heap, MIN_UPD_SIZE used to be 2. Now it's 1. o MIN_UPD_SIZE doesn't apply to stack closures, static closures or non-updateable objects like PAPs or CONSTRs @@ -42,7 +42,7 @@ o EVACUATED -------------------------------------------------------------------------- */ -#define MIN_UPD_SIZE 2 +#define MIN_UPD_SIZE 1 #define MIN_NONUPD_SIZE 1 /* ----------------------------------------------------------------------------- diff --git a/ghc/includes/StgMiscClosures.h b/ghc/includes/StgMiscClosures.h index 45ae06b..b0c15d5 100644 --- a/ghc/includes/StgMiscClosures.h +++ b/ghc/includes/StgMiscClosures.h @@ -122,10 +122,10 @@ RTS_INFO(stg_ARR_WORDS_info); RTS_INFO(stg_MUT_ARR_WORDS_info); RTS_INFO(stg_MUT_ARR_PTRS_info); RTS_INFO(stg_MUT_ARR_PTRS_FROZEN_info); +RTS_INFO(stg_MUT_ARR_PTRS_FROZEN0_info); RTS_INFO(stg_MUT_VAR_info); RTS_INFO(stg_END_TSO_QUEUE_info); RTS_INFO(stg_MUT_CONS_info); -RTS_INFO(stg_END_MUT_LIST_info); RTS_INFO(stg_catch_info); RTS_INFO(stg_PAP_info); RTS_INFO(stg_AP_info); @@ -185,7 +185,6 @@ RTS_ENTRY(stg_MUT_ARR_PTRS_FROZEN_entry); RTS_ENTRY(stg_MUT_VAR_entry); RTS_ENTRY(stg_END_TSO_QUEUE_entry); RTS_ENTRY(stg_MUT_CONS_entry); -RTS_ENTRY(stg_END_MUT_LIST_entry); RTS_ENTRY(stg_catch_entry); RTS_ENTRY(stg_PAP_entry); RTS_ENTRY(stg_AP_entry); @@ -214,7 +213,6 @@ RTS_ENTRY(stg_raise_entry); /* closures */ RTS_CLOSURE(stg_END_TSO_QUEUE_closure); -RTS_CLOSURE(stg_END_MUT_LIST_closure); RTS_CLOSURE(stg_NO_FINALIZER_closure); RTS_CLOSURE(stg_dummy_ret_closure); RTS_CLOSURE(stg_forceIO_closure); diff --git a/ghc/includes/Storage.h b/ghc/includes/Storage.h index 5c76094..7d6fa00 100644 --- a/ghc/includes/Storage.h +++ b/ghc/includes/Storage.h @@ -80,11 +80,10 @@ typedef struct _generation { step * steps; /* steps */ unsigned int n_steps; /* number of steps */ unsigned int max_blocks; /* max blocks in step 0 */ - StgMutClosure *mut_list; /* mut objects in this gen (not G0)*/ - StgMutClosure *mut_once_list; /* objects that point to younger gens */ + bdescr *mut_list; /* mut objects in this gen (not G0)*/ /* temporary use during GC: */ - StgMutClosure * saved_mut_list; + bdescr *saved_mut_list; /* stats information */ unsigned int collections; @@ -200,37 +199,33 @@ extern Mutex sm_mutex; #define RELEASE_SM_LOCK #endif -/* ToDo: shouldn't recordMutable and recordOldToNewPtrs acquire some - * kind of lock in the SMP case? +/* ToDo: shouldn't recordMutable acquire some + * kind of lock in the SMP case? Or do we need per-processor + * mutable lists? */ INLINE_HEADER void -recordMutable(StgMutClosure *p) +recordMutableGen(StgClosure *p, generation *gen) { - bdescr *bd; - -#ifdef SMP - ASSERT(p->header.info == &stg_WHITEHOLE_info || closure_MUTABLE(p)); -#else - ASSERT(closure_MUTABLE(p)); -#endif - - bd = Bdescr((P_)p); - if (bd->gen_no > 0) { - p->mut_link = RTS_DEREF(generations)[bd->gen_no].mut_list; - RTS_DEREF(generations)[bd->gen_no].mut_list = p; - } + bdescr *bd; + + bd = gen->mut_list; + if (bd->free >= bd->start + BLOCK_SIZE_W) { + bdescr *new_bd; + new_bd = allocBlock(); + new_bd->link = bd; + bd = new_bd; + gen->mut_list = bd; + } + *bd->free++ = (StgWord)p; } INLINE_HEADER void -recordOldToNewPtrs(StgMutClosure *p) +recordMutable(StgClosure *p) { - bdescr *bd; - - bd = Bdescr((P_)p); - if (bd->gen_no > 0) { - p->mut_link = RTS_DEREF(generations)[bd->gen_no].mut_once_list; - RTS_DEREF(generations)[bd->gen_no].mut_once_list = p; - } + bdescr *bd; + ASSERT(closure_MUTABLE(p)); + bd = Bdescr((P_)p); + if (bd->gen_no > 0) recordMutableGen(p, &RTS_DEREF(generations)[bd->gen_no]); } /* ----------------------------------------------------------------------------- @@ -277,10 +272,10 @@ INLINE_HEADER StgOffset CONSTR_sizeW( nat p, nat np ) { return sizeofW(StgHeader) + p + np; } INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void ) -{ return sizeofW(StgHeader) + MIN_UPD_SIZE; } +{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgSelector)); } INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void ) -{ return sizeofW(StgHeader) + MIN_UPD_SIZE; } +{ return stg_max(sizeofW(StgHeader)+MIN_UPD_SIZE, sizeofW(StgBlockingQueue)); } /* -------------------------------------------------------------------------- Sizes of closures @@ -354,14 +349,6 @@ extern void resizeNursery ( nat blocks ); extern void tidyAllocateLists ( void ); /* ----------------------------------------------------------------------------- - MUTABLE LISTS - A mutable list is ended with END_MUT_LIST, so that we can use NULL - as an indication that an object is not on a mutable list. - ------------------------------------------------------------------------- */ - -#define END_MUT_LIST ((StgMutClosure *)(void *)&stg_END_MUT_LIST_closure) - -/* ----------------------------------------------------------------------------- Functions from GC.c -------------------------------------------------------------------------- */ diff --git a/ghc/includes/TSO.h b/ghc/includes/TSO.h index a0446b0..2eca88a 100644 --- a/ghc/includes/TSO.h +++ b/ghc/includes/TSO.h @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: TSO.h,v 1.40 2005/01/28 12:55:53 simonmar Exp $ + * $Id: TSO.h,v 1.41 2005/02/10 13:02:05 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -126,7 +126,6 @@ typedef struct StgTSO_ { StgHeader header; struct StgTSO_* link; // Links threads onto blocking queues */ - StgMutClosure * mut_link; // TSO's are mutable of course! */ struct StgTSO_* global_link; // Links all threads together */ StgWord16 what_next; // Values defined in Constants.h diff --git a/ghc/includes/Updates.h b/ghc/includes/Updates.h index 37b8ecc..0845e20 100644 --- a/ghc/includes/Updates.h +++ b/ghc/includes/Updates.h @@ -294,11 +294,8 @@ DEBUG_FILL_SLOP(StgClosure *p) } else { \ if (info != stg_BLACKHOLE_BQ_info) { \ DEBUG_FILL_SLOP(p1); \ - W_ __mut_once_list; \ - __mut_once_list = generation(TO_W_(bdescr_gen_no(bd))) + \ - OFFSET_generation_mut_once_list; \ - StgMutClosure_mut_link(p1) = W_[__mut_once_list]; \ - W_[__mut_once_list] = p1; \ + foreign "C" recordMutableGen(p1 "ptr", \ + generation(TO_W_(bdescr_gen_no(bd))) "ptr"); \ } \ StgInd_indirectee(p1) = p2; \ SET_INFO(p1, stg_IND_OLDGEN_info); \ @@ -323,10 +320,9 @@ DEBUG_FILL_SLOP(StgClosure *p) } else { \ if (_info != &stg_BLACKHOLE_BQ_info) { \ DEBUG_FILL_SLOP(p1); \ - ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \ - generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \ + recordMutableGen(p1, &generations[bd->gen_no]); \ } \ - ((StgIndOldGen *)p1)->indirectee = p2; \ + ((StgInd *)p1)->indirectee = p2; \ SET_INFO(p1, &stg_IND_OLDGEN_info); \ TICK_UPD_OLD_IND(); \ and_then; \ @@ -362,10 +358,9 @@ updateWithPermIndirection(const StgInfoTable *info, TICK_UPD_NEW_PERM_IND(p1); } else { if (info != &stg_BLACKHOLE_BQ_info) { - ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; - generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; + recordMutableGen(p1, &generations[bd->gen_no]); } - ((StgIndOldGen *)p1)->indirectee = p2; + ((StgInd *)p1)->indirectee = p2; SET_INFO(p1, &stg_IND_OLDGEN_PERM_info); // @LDV profiling // We have just created a new closure. diff --git a/ghc/includes/mkDerivedConstants.c b/ghc/includes/mkDerivedConstants.c index fbf236b..77a35bc 100644 --- a/ghc/includes/mkDerivedConstants.c +++ b/ghc/includes/mkDerivedConstants.c @@ -213,7 +213,7 @@ main(int argc, char *argv[]) struct_field(bdescr, link); struct_size(generation); - struct_field(generation, mut_once_list); + struct_field(generation, mut_list); struct_field(CostCentreStack, ccsID); struct_field(CostCentreStack, mem_alloc); @@ -245,7 +245,6 @@ main(int argc, char *argv[]) closure_payload(StgArrWords, payload); closure_field(StgTSO, link); - closure_field(StgTSO, mut_link); closure_field(StgTSO, global_link); closure_field(StgTSO, what_next); closure_field(StgTSO, why_blocked); @@ -294,7 +293,6 @@ main(int argc, char *argv[]) closure_payload(StgAP_STACK, payload); closure_field(StgInd, indirectee); - closure_field(StgMutClosure, mut_link); closure_size(StgMutVar); closure_field(StgMutVar, var); diff --git a/ghc/rts/BlockAlloc.c b/ghc/rts/BlockAlloc.c index ae87fcc..baa096a 100644 --- a/ghc/rts/BlockAlloc.c +++ b/ghc/rts/BlockAlloc.c @@ -51,7 +51,8 @@ initGroup(nat n, bdescr *head) if (n != 0) { head->blocks = n; - head->free = head->start; + head->free = head->start; + head->link = NULL; for (i=1, bd = head+1; i < n; i++, bd++) { bd->free = 0; bd->blocks = 0; @@ -78,9 +79,8 @@ allocGroup(nat n) *last = bd->link; /* no initialisation necessary - this is already a * self-contained block group. */ -#ifdef DEBUG bd->free = bd->start; /* block isn't free now */ -#endif + bd->link = NULL; return bd; } if (bd->blocks > n) { /* block too big... */ @@ -226,13 +226,12 @@ freeGroup(bdescr *p) return; } -#ifdef DEBUG + p->free = (void *)-1; /* indicates that this block is free */ p->step = NULL; p->gen_no = 0; /* fill the block group with garbage if sanity checking is on */ IF_DEBUG(sanity,memset(p->start, 0xaa, p->blocks * BLOCK_SIZE)); -#endif /* find correct place in free list to place new group */ last = NULL; diff --git a/ghc/rts/BlockAlloc.h b/ghc/rts/BlockAlloc.h index 8d52e32..1472ac6 100644 --- a/ghc/rts/BlockAlloc.h +++ b/ghc/rts/BlockAlloc.h @@ -9,37 +9,6 @@ #ifndef BLOCK_ALLOC_H #define BLOCK_ALLOC_H -/* Initialisation ---------------------------------------------------------- */ - -extern void initBlockAllocator(void); - -/* Allocation -------------------------------------------------------------- */ - -extern bdescr *allocGroup(nat n); -extern bdescr *allocBlock(void); - -/* De-Allocation ----------------------------------------------------------- */ - -extern void freeGroup(bdescr *p); -extern void freeChain(bdescr *p); - -/* Round a value to megablocks --------------------------------------------- */ - -#define WORDS_PER_MBLOCK (BLOCKS_PER_MBLOCK * BLOCK_SIZE_W) - -INLINE_HEADER nat -round_to_mblocks(nat words) -{ - if (words > WORDS_PER_MBLOCK) { - if ((words % WORDS_PER_MBLOCK) < (WORDS_PER_MBLOCK / 2)) { - words = (words / WORDS_PER_MBLOCK) * WORDS_PER_MBLOCK; - } else { - words = ((words / WORDS_PER_MBLOCK) + 1) * WORDS_PER_MBLOCK; - } - } - return words; -} - /* Debugging -------------------------------------------------------------- */ #ifdef DEBUG diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 06f46f7..a57fa2c 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -148,7 +148,6 @@ static void mark_root ( StgClosure **root ); REGPARM1 static StgClosure * evacuate (StgClosure *q); static void zero_static_object_list ( StgClosure* first_static ); -static void zero_mutable_list ( StgMutClosure *first ); static rtsBool traverse_weak_ptr_list ( void ); static void mark_weak_ptr_list ( StgWeak **list ); @@ -163,7 +162,6 @@ static rtsBool scavenge_one ( StgPtr p ); static void scavenge_large ( step * ); static void scavenge_static ( void ); static void scavenge_mutable_list ( generation *g ); -static void scavenge_mut_once_list ( generation *g ); static void scavenge_large_bitmap ( StgPtr p, StgLargeBitmap *large_bitmap, @@ -265,7 +263,7 @@ gc_alloc_block(step *stp) (and all younger generations): - follow all pointers in the root set. the root set includes all - mutable objects in all generations (mutable_list and mut_once_list). + mutable objects in all generations (mutable_list). - for each pointer, evacuate the object it points to into either @@ -277,7 +275,7 @@ gc_alloc_block(step *stp) When we evacuate an object we attempt to evacuate everything it points to into the same generation - this is achieved by setting evac_gen to the desired generation. If - we can't do this, then an entry in the mut_once list has to + we can't do this, then an entry in the mut list has to be made for the cross-generation pointer. + if the object is already in a generation > N, then leave @@ -369,13 +367,6 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) static_objects = END_OF_STATIC_LIST; scavenged_static_objects = END_OF_STATIC_LIST; - /* zero the mutable list for the oldest generation (see comment by - * zero_mutable_list below). - */ - if (major_gc) { - zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list); - } - /* Save the old to-space if we're doing a two-space collection */ if (RtsFlags.GcFlags.generations == 1) { @@ -393,8 +384,14 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // collecting. // for (g = 0; g <= N; g++) { - generations[g].mut_once_list = END_MUT_LIST; - generations[g].mut_list = END_MUT_LIST; + + // throw away the mutable list. Invariant: the mutable list + // always has at least one block; this means we can avoid a check for + // NULL in recordMutable(). + if (g != 0) { + freeChain(generations[g].mut_list); + generations[g].mut_list = allocBlock(); + } for (s = 0; s < generations[g].n_steps; s++) { @@ -517,23 +514,12 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) int st; for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { generations[g].saved_mut_list = generations[g].mut_list; - generations[g].mut_list = END_MUT_LIST; + generations[g].mut_list = allocBlock(); + // mut_list always has at least one block. } - // Do the mut-once lists first for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { - IF_PAR_DEBUG(verbose, - printMutOnceList(&generations[g])); - scavenge_mut_once_list(&generations[g]); - evac_gen = g; - for (st = generations[g].n_steps-1; st >= 0; st--) { - scavenge(&generations[g].steps[st]); - } - } - - for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { - IF_PAR_DEBUG(verbose, - printMutableList(&generations[g])); + IF_PAR_DEBUG(verbose, printMutableList(&generations[g])); scavenge_mutable_list(&generations[g]); evac_gen = g; for (st = generations[g].n_steps-1; st >= 0; st--) { @@ -719,6 +705,14 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) generations[g].collections++; // for stats } + // Count the mutable list as bytes "copied" for the purposes of + // stats. Every mutable list is copied during every GC. + if (g > 0) { + for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) { + copied += (bd->free - bd->start) * sizeof(StgWord); + } + } + for (s = 0; s < generations[g].n_steps; s++) { bdescr *next; stp = &generations[g].steps[s]; @@ -1588,39 +1582,6 @@ evacuate_large(StgPtr p) } /* ----------------------------------------------------------------------------- - Adding a MUT_CONS to an older generation. - - This is necessary from time to time when we end up with an - old-to-new generation pointer in a non-mutable object. We defer - the promotion until the next GC. - -------------------------------------------------------------------------- */ - -static StgClosure * -mkMutCons(StgClosure *ptr, generation *gen) -{ - StgMutVar *q; - step *stp; - - stp = &gen->steps[0]; - - /* chain a new block onto the to-space for the destination step if - * necessary. - */ - if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) { - gc_alloc_block(stp); - } - - q = (StgMutVar *)stp->hp; - stp->hp += sizeofW(StgMutVar); - - SET_HDR(q,&stg_MUT_CONS_info,CCS_GC); - q->var = ptr; - recordOldToNewPtrs((StgMutClosure *)q); - - return (StgClosure *)q; -} - -/* ----------------------------------------------------------------------------- Evacuate This is called (eventually) for every live object in the system. @@ -1750,10 +1711,10 @@ loop: case FUN_1_0: case FUN_0_1: case CONSTR_1_0: + case THUNK_1_0: + case THUNK_0_1: return copy(q,sizeofW(StgHeader)+1,stp); - case THUNK_1_0: // here because of MIN_UPD_SIZE - case THUNK_0_1: case THUNK_1_1: case THUNK_0_2: case THUNK_2_0: @@ -1921,6 +1882,7 @@ loop: case MUT_ARR_PTRS: case MUT_ARR_PTRS_FROZEN: + case MUT_ARR_PTRS_FROZEN0: // just copy the block return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp); @@ -2505,9 +2467,6 @@ scavenge(step *stp) switch (info->type) { case MVAR: - /* treat MVars specially, because we don't want to evacuate the - * mut_link field in the middle of the closure. - */ { StgMVar *mvar = ((StgMVar *)p); evac_gen = 0; @@ -2515,8 +2474,7 @@ scavenge(step *stp) mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); mvar->value = evacuate((StgClosure *)mvar->value); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)mvar); - failed_to_evac = rtsFalse; // mutable. + failed_to_evac = rtsTrue; // mutable. p += sizeofW(StgMVar); break; } @@ -2539,7 +2497,7 @@ scavenge(step *stp) case THUNK_1_0: scavenge_thunk_srt(info); ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); - p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE + p += sizeofW(StgHeader) + 1; break; case FUN_1_0: @@ -2551,7 +2509,7 @@ scavenge(step *stp) case THUNK_0_1: scavenge_thunk_srt(info); - p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE + p += sizeofW(StgHeader) + 1; break; case FUN_0_1: @@ -2636,27 +2594,15 @@ scavenge(step *stp) } // fall through case IND_OLDGEN_PERM: - ((StgIndOldGen *)p)->indirectee = - evacuate(((StgIndOldGen *)p)->indirectee); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordOldToNewPtrs((StgMutClosure *)p); - } - p += sizeofW(StgIndOldGen); + ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee); + p += sizeofW(StgInd); break; case MUT_VAR: evac_gen = 0; ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)p); - failed_to_evac = rtsFalse; // mutable anyhow - p += sizeofW(StgMutVar); - break; - - case MUT_CONS: - // ignore these - failed_to_evac = rtsFalse; // mutable anyhow + failed_to_evac = rtsTrue; // mutable anyhow p += sizeofW(StgMutVar); break; @@ -2672,8 +2618,7 @@ scavenge(step *stp) StgBlockingQueue *bh = (StgBlockingQueue *)p; bh->blocking_queue = (StgTSO *)evacuate((StgClosure *)bh->blocking_queue); - recordMutable((StgMutClosure *)bh); - failed_to_evac = rtsFalse; + failed_to_evac = rtsTrue; p += BLACKHOLE_sizeW(); break; } @@ -2718,21 +2663,16 @@ scavenge(step *stp) *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)q); - failed_to_evac = rtsFalse; // mutable anyhow. + failed_to_evac = rtsTrue; // mutable anyhow. break; } case MUT_ARR_PTRS_FROZEN: + case MUT_ARR_PTRS_FROZEN0: // follow everything { StgPtr next; - // Set the mut_link field to NULL, so that we will put this - // array back on the mutable list if it is subsequently thawed - // by unsafeThaw#. - ((StgMutArrPtrs*)p)->mut_link = NULL; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); @@ -2750,8 +2690,7 @@ scavenge(step *stp) evac_gen = 0; scavengeTSO(tso); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)tso); - failed_to_evac = rtsFalse; // mutable anyhow. + failed_to_evac = rtsTrue; // mutable anyhow. p += tso_sizeW(tso); break; } @@ -2767,8 +2706,7 @@ scavenge(step *stp) StgRBH *rbh = (StgRBH *)p; (StgClosure *)rbh->blocking_queue = evacuate((StgClosure *)rbh->blocking_queue); - recordMutable((StgMutClosure *)to); - failed_to_evac = rtsFalse; // mutable anyhow. + failed_to_evac = rtsTrue; // mutable anyhow. IF_DEBUG(gc, debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", p, info_type(p), (StgClosure *)rbh->blocking_queue)); @@ -2786,10 +2724,6 @@ scavenge(step *stp) // follow the link to the rest of the blocking queue (StgClosure *)bf->link = evacuate((StgClosure *)bf->link); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)bf); - } IF_DEBUG(gc, debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", bf, info_type((StgClosure *)bf), @@ -2810,10 +2744,6 @@ scavenge(step *stp) StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; (StgClosure *)fmbq->blocking_queue = evacuate((StgClosure *)fmbq->blocking_queue); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)fmbq); - } IF_DEBUG(gc, debugBelch("@@ scavenge: %p (%s) exciting, isn't it", p, info_type((StgClosure *)p))); @@ -2830,8 +2760,7 @@ scavenge(step *stp) wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry); wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)wq); - failed_to_evac = rtsFalse; // mutable + failed_to_evac = rtsTrue; // mutable p += sizeofW(StgTVarWaitQueue); break; } @@ -2843,8 +2772,7 @@ scavenge(step *stp) tvar->current_value = evacuate((StgClosure*)tvar->current_value); tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)tvar); - failed_to_evac = rtsFalse; // mutable + failed_to_evac = rtsTrue; // mutable p += sizeofW(StgTVar); break; } @@ -2856,8 +2784,7 @@ scavenge(step *stp) trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)trec); - failed_to_evac = rtsFalse; // mutable + failed_to_evac = rtsTrue; // mutable p += sizeofW(StgTRecHeader); break; } @@ -2875,8 +2802,7 @@ scavenge(step *stp) e->new_value = evacuate((StgClosure*)e->new_value); } evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)tc); - failed_to_evac = rtsFalse; // mutable + failed_to_evac = rtsTrue; // mutable p += sizeofW(StgTRecChunk); break; } @@ -2886,13 +2812,16 @@ scavenge(step *stp) info->type, p); } - /* If we didn't manage to promote all the objects pointed to by - * the current object, then we have to designate this object as - * mutable (because it contains old-to-new generation pointers). + /* + * We need to record the current object on the mutable list if + * (a) It is actually mutable, or + * (b) It contains pointers to a younger generation. + * Case (b) arises if we didn't manage to promote everything that + * the current object points to into the current generation. */ if (failed_to_evac) { failed_to_evac = rtsFalse; - mkMutCons((StgClosure *)q, &generations[evac_gen]); + recordMutableGen((StgClosure *)q, stp->gen); } } @@ -2929,9 +2858,6 @@ linear_scan: switch (info->type) { case MVAR: - /* treat MVars specially, because we don't want to evacuate the - * mut_link field in the middle of the closure. - */ { StgMVar *mvar = ((StgMVar *)p); evac_gen = 0; @@ -2939,7 +2865,7 @@ linear_scan: mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); mvar->value = evacuate((StgClosure *)mvar->value); evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; // mutable. + failed_to_evac = rtsTrue; // mutable. break; } @@ -3024,24 +2950,15 @@ linear_scan: case IND_OLDGEN: case IND_OLDGEN_PERM: - ((StgIndOldGen *)p)->indirectee = - evacuate(((StgIndOldGen *)p)->indirectee); - if (failed_to_evac) { - recordOldToNewPtrs((StgMutClosure *)p); - } - failed_to_evac = rtsFalse; + ((StgInd *)p)->indirectee = + evacuate(((StgInd *)p)->indirectee); break; case MUT_VAR: evac_gen = 0; ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; - break; - - case MUT_CONS: - // ignore these - failed_to_evac = rtsFalse; + failed_to_evac = rtsTrue; break; case CAF_BLACKHOLE: @@ -3056,7 +2973,7 @@ linear_scan: StgBlockingQueue *bh = (StgBlockingQueue *)p; bh->blocking_queue = (StgTSO *)evacuate((StgClosure *)bh->blocking_queue); - failed_to_evac = rtsFalse; + failed_to_evac = rtsTrue; break; } @@ -3093,20 +3010,16 @@ linear_scan: *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; // mutable anyhow. + failed_to_evac = rtsTrue; // mutable anyhow. break; } case MUT_ARR_PTRS_FROZEN: + case MUT_ARR_PTRS_FROZEN0: // follow everything { StgPtr next; - // Set the mut_link field to NULL, so that we will put this - // array on the mutable list if it is subsequently thawed - // by unsafeThaw#. - ((StgMutArrPtrs*)p)->mut_link = NULL; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); @@ -3120,7 +3033,7 @@ linear_scan: evac_gen = 0; scavengeTSO(tso); evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; + failed_to_evac = rtsTrue; break; } @@ -3135,8 +3048,7 @@ linear_scan: StgRBH *rbh = (StgRBH *)p; bh->blocking_queue = (StgTSO *)evacuate((StgClosure *)bh->blocking_queue); - recordMutable((StgMutClosure *)rbh); - failed_to_evac = rtsFalse; // mutable anyhow. + failed_to_evac = rtsTrue; // mutable anyhow. IF_DEBUG(gc, debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", p, info_type(p), (StgClosure *)rbh->blocking_queue)); @@ -3152,10 +3064,6 @@ linear_scan: // follow the link to the rest of the blocking queue (StgClosure *)bf->link = evacuate((StgClosure *)bf->link); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)bf); - } IF_DEBUG(gc, debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", bf, info_type((StgClosure *)bf), @@ -3174,10 +3082,6 @@ linear_scan: StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; (StgClosure *)fmbq->blocking_queue = evacuate((StgClosure *)fmbq->blocking_queue); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)fmbq); - } IF_DEBUG(gc, debugBelch("@@ scavenge: %p (%s) exciting, isn't it", p, info_type((StgClosure *)p))); @@ -3193,8 +3097,7 @@ linear_scan: wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry); wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)wq); - failed_to_evac = rtsFalse; // mutable + failed_to_evac = rtsTrue; // mutable break; } @@ -3205,8 +3108,7 @@ linear_scan: tvar->current_value = evacuate((StgClosure*)tvar->current_value); tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)tvar); - failed_to_evac = rtsFalse; // mutable + failed_to_evac = rtsTrue; // mutable break; } @@ -3223,8 +3125,7 @@ linear_scan: e->new_value = evacuate((StgClosure*)e->new_value); } evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)tc); - failed_to_evac = rtsFalse; // mutable + failed_to_evac = rtsTrue; // mutable break; } @@ -3235,8 +3136,7 @@ linear_scan: trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); evac_gen = saved_evac_gen; - recordMutable((StgMutClosure *)trec); - failed_to_evac = rtsFalse; // mutable + failed_to_evac = rtsTrue; // mutable break; } @@ -3247,7 +3147,7 @@ linear_scan: if (failed_to_evac) { failed_to_evac = rtsFalse; - mkMutCons((StgClosure *)q, &generations[evac_gen]); + recordMutableGen((StgClosure *)q, &generations[evac_gen]); } // mark the next bit to indicate "scavenged" @@ -3314,6 +3214,18 @@ scavenge_one(StgPtr p) switch (info->type) { + case MVAR: + { + StgMVar *mvar = ((StgMVar *)p); + evac_gen = 0; + mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); + mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); + mvar->value = evacuate((StgClosure *)mvar->value); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable. + break; + } + case FUN: case FUN_1_0: // hardly worth specialising these guys case FUN_0_1: @@ -3335,7 +3247,6 @@ scavenge_one(StgPtr p) case WEAK: case FOREIGN: case IND_PERM: - case IND_OLDGEN_PERM: { StgPtr q, end; @@ -3346,12 +3257,29 @@ scavenge_one(StgPtr p) break; } + case MUT_VAR: + evac_gen = 0; + ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable anyhow + break; + case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: case SE_BLACKHOLE: case BLACKHOLE: break; + case BLACKHOLE_BQ: + { + StgBlockingQueue *bh = (StgBlockingQueue *)p; + evac_gen = 0; // repeatedly mutable + bh->blocking_queue = + (StgTSO *)evacuate((StgClosure *)bh->blocking_queue); + failed_to_evac = rtsTrue; + break; + } + case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; @@ -3359,6 +3287,21 @@ scavenge_one(StgPtr p) break; } + case AP_STACK: + { + StgAP_STACK *ap = (StgAP_STACK *)p; + + ap->fun = evacuate(ap->fun); + scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); + p = (StgPtr)ap->payload + ap->size; + break; + } + + case PAP: + case AP: + p = scavenge_PAP((StgPAP *)p); + break; + case ARR_WORDS: // nothing to follow break; @@ -3369,26 +3312,21 @@ scavenge_one(StgPtr p) StgPtr next; evac_gen = 0; // repeatedly mutable - recordMutable((StgMutClosure *)p); next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); } evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; + failed_to_evac = rtsTrue; break; } case MUT_ARR_PTRS_FROZEN: + case MUT_ARR_PTRS_FROZEN0: { // follow everything StgPtr next; - // Set the mut_link field to NULL, so that we will put this - // array on the mutable list if it is subsequently thawed - // by unsafeThaw#. - ((StgMutArrPtrs*)p)->mut_link = NULL; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); @@ -3402,82 +3340,122 @@ scavenge_one(StgPtr p) evac_gen = 0; // repeatedly mutable scavengeTSO(tso); - recordMutable((StgMutClosure *)tso); evac_gen = saved_evac_gen; - failed_to_evac = rtsFalse; + failed_to_evac = rtsTrue; break; } - case AP_STACK: - { - StgAP_STACK *ap = (StgAP_STACK *)p; - - ap->fun = evacuate(ap->fun); - scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); - p = (StgPtr)ap->payload + ap->size; +#if defined(PAR) + case RBH: // cf. BLACKHOLE_BQ + { +#if 0 + nat size, ptrs, nonptrs, vhs; + char str[80]; + StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); +#endif + StgRBH *rbh = (StgRBH *)p; + (StgClosure *)rbh->blocking_queue = + evacuate((StgClosure *)rbh->blocking_queue); + failed_to_evac = rtsTrue; // mutable anyhow. + IF_DEBUG(gc, + debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)", + p, info_type(p), (StgClosure *)rbh->blocking_queue)); + // ToDo: use size of reverted closure here! break; } - case PAP: - case AP: - p = scavenge_PAP((StgPAP *)p); - break; - - case IND_OLDGEN: - // This might happen if for instance a MUT_CONS was pointing to a - // THUNK which has since been updated. The IND_OLDGEN will - // be on the mutable list anyway, so we don't need to do anything - // here. + case BLOCKED_FETCH: + { + StgBlockedFetch *bf = (StgBlockedFetch *)p; + // follow the pointer to the node which is being demanded + (StgClosure *)bf->node = + evacuate((StgClosure *)bf->node); + // follow the link to the rest of the blocking queue + (StgClosure *)bf->link = + evacuate((StgClosure *)bf->link); + IF_DEBUG(gc, + debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it", + bf, info_type((StgClosure *)bf), + bf->node, info_type(bf->node))); break; + } - default: - barf("scavenge_one: strange object %d", (int)(info->type)); - } - - no_luck = failed_to_evac; - failed_to_evac = rtsFalse; - return (no_luck); -} - -/* ----------------------------------------------------------------------------- - Scavenging mutable lists. +#ifdef DIST + case REMOTE_REF: +#endif + case FETCH_ME: + break; // nothing to do in this case - We treat the mutable list of each generation > N (i.e. all the - generations older than the one being collected) as roots. We also - remove non-mutable objects from the mutable list at this point. - -------------------------------------------------------------------------- */ + case FETCH_ME_BQ: // cf. BLACKHOLE_BQ + { + StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; + (StgClosure *)fmbq->blocking_queue = + evacuate((StgClosure *)fmbq->blocking_queue); + IF_DEBUG(gc, + debugBelch("@@ scavenge: %p (%s) exciting, isn't it", + p, info_type((StgClosure *)p))); + break; + } +#endif -static void -scavenge_mut_once_list(generation *gen) -{ - const StgInfoTable *info; - StgMutClosure *p, *next, *new_list; + case TVAR_WAIT_QUEUE: + { + StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p); + evac_gen = 0; + wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso); + wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry); + wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } - p = gen->mut_once_list; - new_list = END_MUT_LIST; - next = p->mut_link; + case TVAR: + { + StgTVar *tvar = ((StgTVar *) p); + evac_gen = 0; + tvar->current_value = evacuate((StgClosure*)tvar->current_value); + tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } - evac_gen = gen->no; - failed_to_evac = rtsFalse; + case TREC_HEADER: + { + StgTRecHeader *trec = ((StgTRecHeader *) p); + evac_gen = 0; + trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); + trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } - for (; p != END_MUT_LIST; p = next, next = p->mut_link) { + case TREC_CHUNK: + { + StgWord i; + StgTRecChunk *tc = ((StgTRecChunk *) p); + TRecEntry *e = &(tc -> entries[0]); + evac_gen = 0; + tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); + for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { + e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); + e->expected_value = evacuate((StgClosure*)e->expected_value); + e->new_value = evacuate((StgClosure*)e->new_value); + } + evac_gen = saved_evac_gen; + failed_to_evac = rtsTrue; // mutable + break; + } - ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); - info = get_itbl(p); - /* - if (info->type==RBH) - info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure - */ - switch(info->type) { - case IND_OLDGEN: case IND_OLDGEN_PERM: case IND_STATIC: /* Try to pull the indirectee into this generation, so we can * remove the indirection from the mutable list. */ - ((StgIndOldGen *)p)->indirectee = - evacuate(((StgIndOldGen *)p)->indirectee); + ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee); #if 0 && defined(DEBUG) if (RtsFlags.DebugFlags.gc) @@ -3504,284 +3482,50 @@ scavenge_mut_once_list(generation *gen) debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_)); } #endif - - /* failed_to_evac might happen if we've got more than two - * generations, we're collecting only generation 0, the - * indirection resides in generation 2 and the indirectee is - * in generation 1. - */ - if (failed_to_evac) { - failed_to_evac = rtsFalse; - p->mut_link = new_list; - new_list = p; - } else { - /* the mut_link field of an IND_STATIC is overloaded as the - * static link field too (it just so happens that we don't need - * both at the same time), so we need to NULL it out when - * removing this object from the mutable list because the static - * link fields are all assumed to be NULL before doing a major - * collection. - */ - p->mut_link = NULL; - } - continue; - - case MUT_CONS: - /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove - * it from the mutable list if possible by promoting whatever it - * points to. - */ - if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) { - /* didn't manage to promote everything, so put the - * MUT_CONS back on the list. - */ - p->mut_link = new_list; - new_list = p; - } - continue; + break; default: - // shouldn't have anything else on the mutables list - barf("scavenge_mut_once_list: strange object? %d", (int)(info->type)); - } - } + barf("scavenge_one: strange object %d", (int)(info->type)); + } - gen->mut_once_list = new_list; + no_luck = failed_to_evac; + failed_to_evac = rtsFalse; + return (no_luck); } +/* ----------------------------------------------------------------------------- + Scavenging mutable lists. + + We treat the mutable list of each generation > N (i.e. all the + generations older than the one being collected) as roots. We also + remove non-mutable objects from the mutable list at this point. + -------------------------------------------------------------------------- */ static void scavenge_mutable_list(generation *gen) { - const StgInfoTable *info; - StgMutClosure *p, *next; - - p = gen->saved_mut_list; - next = p->mut_link; - - evac_gen = 0; - failed_to_evac = rtsFalse; - - for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - - ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); - info = get_itbl(p); - /* - if (info->type==RBH) - info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure - */ - switch(info->type) { - - case MUT_ARR_PTRS: - // follow everything - p->mut_link = gen->mut_list; - gen->mut_list = p; - { - StgPtr end, q; - - end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { - *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); - } - continue; - } - - // Happens if a MUT_ARR_PTRS in the old generation is frozen - case MUT_ARR_PTRS_FROZEN: - { - StgPtr end, q; - - evac_gen = gen->no; - end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) { - *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); - } - evac_gen = 0; - // Set the mut_link field to NULL, so that we will put this - // array back on the mutable list if it is subsequently thawed - // by unsafeThaw#. - p->mut_link = NULL; - if (failed_to_evac) { - failed_to_evac = rtsFalse; - mkMutCons((StgClosure *)p, gen); - } - continue; - } - - case MUT_VAR: - ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; - - case MVAR: - { - StgMVar *mvar = (StgMVar *)p; - mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); - mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); - mvar->value = evacuate((StgClosure *)mvar->value); - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; - } - - case TSO: - { - StgTSO *tso = (StgTSO *)p; - - scavengeTSO(tso); - - /* Don't take this TSO off the mutable list - it might still - * point to some younger objects (because we set evac_gen to 0 - * above). - */ - tso->mut_link = gen->mut_list; - gen->mut_list = (StgMutClosure *)tso; - continue; - } - - case BLACKHOLE_BQ: - { - StgBlockingQueue *bh = (StgBlockingQueue *)p; - bh->blocking_queue = - (StgTSO *)evacuate((StgClosure *)bh->blocking_queue); - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; - } - - /* Happens if a BLACKHOLE_BQ in the old generation is updated: - */ - case IND_OLDGEN: - case IND_OLDGEN_PERM: - /* Try to pull the indirectee into this generation, so we can - * remove the indirection from the mutable list. - */ - evac_gen = gen->no; - ((StgIndOldGen *)p)->indirectee = - evacuate(((StgIndOldGen *)p)->indirectee); - evac_gen = 0; - - if (failed_to_evac) { - failed_to_evac = rtsFalse; - p->mut_link = gen->mut_once_list; - gen->mut_once_list = p; - } else { - p->mut_link = NULL; - } - continue; - -#if defined(PAR) - // HWL: check whether all of these are necessary - - case RBH: // cf. BLACKHOLE_BQ - { - // nat size, ptrs, nonptrs, vhs; - // char str[80]; - // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str); - StgRBH *rbh = (StgRBH *)p; - (StgClosure *)rbh->blocking_queue = - evacuate((StgClosure *)rbh->blocking_queue); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)rbh); - } - // ToDo: use size of reverted closure here! - p += BLACKHOLE_sizeW(); - break; - } - - case BLOCKED_FETCH: - { - StgBlockedFetch *bf = (StgBlockedFetch *)p; - // follow the pointer to the node which is being demanded - (StgClosure *)bf->node = - evacuate((StgClosure *)bf->node); - // follow the link to the rest of the blocking queue - (StgClosure *)bf->link = - evacuate((StgClosure *)bf->link); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)bf); - } - p += sizeofW(StgBlockedFetch); - break; - } - -#ifdef DIST - case REMOTE_REF: - barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type)); -#endif - case FETCH_ME: - p += sizeofW(StgFetchMe); - break; // nothing to do in this case - - case FETCH_ME_BQ: // cf. BLACKHOLE_BQ - { - StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p; - (StgClosure *)fmbq->blocking_queue = - evacuate((StgClosure *)fmbq->blocking_queue); - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutable((StgMutClosure *)fmbq); - } - p += sizeofW(StgFetchMeBlockingQueue); - break; - } -#endif - - case TVAR_WAIT_QUEUE: - { - StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p); - wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso); - wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry); - wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry); - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; - } + bdescr *bd; + StgPtr p, q; - case TVAR: - { - StgTVar *tvar = ((StgTVar *) p); - tvar->current_value = evacuate((StgClosure*)tvar->current_value); - tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry); - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; - } + bd = gen->saved_mut_list; - case TREC_CHUNK: - { - StgWord i; - StgTRecChunk *tc = ((StgTRecChunk *) p); - TRecEntry *e = &(tc -> entries[0]); - tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); - for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { - e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); - e->expected_value = evacuate((StgClosure*)e->expected_value); - e->new_value = evacuate((StgClosure*)e->new_value); + evac_gen = gen->no; + for (; bd != NULL; bd = bd->link) { + for (q = bd->start; q < bd->free; q++) { + p = (StgPtr)*q; + ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); + if (scavenge_one(p)) { + /* didn't manage to promote everything, so put the + * object back on the list. + */ + recordMutableGen((StgClosure *)p,gen); + } } - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; - } - - case TREC_HEADER: - { - StgTRecHeader *trec = ((StgTRecHeader *) p); - trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); - trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); - p->mut_link = gen->mut_list; - gen->mut_list = p; - continue; - } - - default: - // shouldn't have anything else on the mutables list - barf("scavenge_mutable_list: strange object? %d", (int)(info->type)); } - } + + // free the old mut_list + freeChain(gen->saved_mut_list); + gen->saved_mut_list = NULL; } @@ -3822,15 +3566,13 @@ scavenge_static(void) ind->indirectee = evacuate(ind->indirectee); /* might fail to evacuate it, in which case we have to pop it - * back on the mutable list (and take it off the - * scavenged_static list because the static link and mut link - * pointers are one and the same). + * back on the mutable list of the oldest generation. We + * leave it *on* the scavenged_static_objects list, though, + * in case we visit this object again. */ if (failed_to_evac) { failed_to_evac = rtsFalse; - scavenged_static_objects = IND_STATIC_LINK(p); - ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list; - oldest_gen->mut_once_list = (StgMutClosure *)ind; + recordMutableGen((StgClosure *)p,oldest_gen); } break; } @@ -4064,7 +3806,7 @@ scavenge_large(step *stp) p = bd->start; if (scavenge_one(p)) { - mkMutCons((StgClosure *)p, stp->gen); + recordMutableGen((StgClosure *)p, stp->gen); } } } @@ -4087,26 +3829,6 @@ zero_static_object_list(StgClosure* first_static) } } -/* This function is only needed because we share the mutable link - * field with the static link field in an IND_STATIC, so we have to - * zero the mut_link field before doing a major GC, which needs the - * static link field. - * - * It doesn't do any harm to zero all the mutable link fields on the - * mutable list. - */ - -static void -zero_mutable_list( StgMutClosure *first ) -{ - StgMutClosure *next, *c; - - for (c = first; c != END_MUT_LIST; c = next) { - next = c->mut_link; - c->mut_link = NULL; - } -} - /* ----------------------------------------------------------------------------- Reverting CAFs -------------------------------------------------------------------------- */ @@ -4487,35 +4209,19 @@ threadPaused(StgTSO *tso) #if DEBUG void -printMutOnceList(generation *gen) -{ - StgMutClosure *p, *next; - - p = gen->mut_once_list; - next = p->mut_link; - - debugBelch("@@ Mut once list %p: ", gen->mut_once_list); - for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - debugBelch("%p (%s), ", - p, info_type((StgClosure *)p)); - } - debugBelch("\n"); -} - -void printMutableList(generation *gen) { - StgMutClosure *p, *next; + bdescr *bd; + StgPtr p; - p = gen->mut_list; - next = p->mut_link; + debugBelch("@@ Mutable list %p: ", gen->mut_list); - debugBelch("@@ Mutable list %p: ", gen->mut_list); - for (; p != END_MUT_LIST; p = next, next = p->mut_link) { - debugBelch("%p (%s), ", - p, info_type((StgClosure *)p)); - } - debugBelch("\n"); + for (bd = gen->mut_list; bd != NULL; bd = bd->link) { + for (p = bd->start; p < bd->free; p++) { + debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p)); + } + } + debugBelch("\n"); } STATIC_INLINE rtsBool @@ -4527,6 +4233,7 @@ maybeLarge(StgClosure *closure) see scavenge_large */ return (info->type == MUT_ARR_PTRS || info->type == MUT_ARR_PTRS_FROZEN || + info->type == MUT_ARR_PTRS_FROZEN0 || info->type == TSO || info->type == ARR_WORDS); } diff --git a/ghc/rts/GCCompact.c b/ghc/rts/GCCompact.c index 2f124d5..c8bd32f 100644 --- a/ghc/rts/GCCompact.c +++ b/ghc/rts/GCCompact.c @@ -542,7 +542,6 @@ thread_obj (StgInfoTable *info, StgPtr p) case STABLE_NAME: case IND_PERM: case MUT_VAR: - case MUT_CONS: case CAF_BLACKHOLE: case SE_CAF_BLACKHOLE: case SE_BLACKHOLE: @@ -582,8 +581,8 @@ thread_obj (StgInfoTable *info, StgPtr p) case IND_OLDGEN: case IND_OLDGEN_PERM: - thread((StgPtr)&((StgIndOldGen *)p)->indirectee); - return p + sizeofW(StgIndOldGen); + thread((StgPtr)&((StgInd *)p)->indirectee); + return p + sizeofW(StgInd); case THUNK_SELECTOR: { @@ -841,7 +840,7 @@ update_bkwd_compact( step *stp ) // Rebuild the mutable list for the old generation. if (ip_MUTABLE(info)) { - recordMutable((StgMutClosure *)free); + recordMutable((StgClosure *)free); } // relocate TSOs @@ -868,19 +867,6 @@ update_bkwd_compact( step *stp ) return free_blocks; } -static void -thread_mut_once_list( generation *g ) -{ - StgMutClosure *p, *next; - - for (p = g->mut_once_list; p != END_MUT_LIST; p = next) { - next = p->mut_link; - thread((StgPtr)&p->mut_link); - } - - thread((StgPtr)&g->mut_once_list); -} - void compact( void (*get_roots)(evac_fn) ) { @@ -900,8 +886,13 @@ compact( void (*get_roots)(evac_fn) ) // mutable lists for (g = 1; g < RtsFlags.GcFlags.generations; g++) { - thread((StgPtr)&generations[g].mut_list); - thread_mut_once_list(&generations[g]); + bdescr *bd; + StgPtr p; + for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) { + for (p = bd->start; p < bd->free; p++) { + thread(p); + } + } } // the global thread list diff --git a/ghc/rts/LdvProfile.c b/ghc/rts/LdvProfile.c index 4c2dbab..e46f4d7 100644 --- a/ghc/rts/LdvProfile.c +++ b/ghc/rts/LdvProfile.c @@ -134,7 +134,6 @@ processHeapClosureForDead( StgClosure *c ) case WEAK: case MUT_VAR: - case MUT_CONS: case FOREIGN: case BCO: case STABLE_NAME: @@ -190,11 +189,8 @@ processHeapClosureForDead( StgClosure *c ) break; case IND_PERM: - size = sizeofW(StgInd); - break; - case IND_OLDGEN_PERM: - size = sizeofW(StgIndOldGen); + size = sizeofW(StgInd); break; /* @@ -207,11 +203,8 @@ processHeapClosureForDead( StgClosure *c ) // because they will perish before the next census at any // rate. case IND: - size = sizeofW(StgInd); - return size; - case IND_OLDGEN: - size = sizeofW(StgIndOldGen); + size = sizeofW(StgInd); return size; case EVACUATED: diff --git a/ghc/rts/Linker.c b/ghc/rts/Linker.c index 4a2a51b..8aed286 100644 --- a/ghc/rts/Linker.c +++ b/ghc/rts/Linker.c @@ -528,6 +528,7 @@ typedef struct _RtsSymbolVal { SymX(stg_IND_STATIC_info) \ SymX(stg_INTLIKE_closure) \ SymX(stg_MUT_ARR_PTRS_FROZEN_info) \ + SymX(stg_MUT_ARR_PTRS_FROZEN0_info) \ SymX(stg_WEAK_info) \ SymX(stg_ap_0_info) \ SymX(stg_ap_v_info) \ diff --git a/ghc/rts/PrimOps.cmm b/ghc/rts/PrimOps.cmm index a7ba08a..1a944ed 100644 --- a/ghc/rts/PrimOps.cmm +++ b/ghc/rts/PrimOps.cmm @@ -118,8 +118,6 @@ newArrayzh_fast unsafeThawArrayzh_fast { - SET_INFO(R1,stg_MUT_ARR_PTRS_info); - // SUBTLETY TO DO WITH THE OLD GEN MUTABLE LIST // // A MUT_ARR_PTRS lives on the mutable list, but a MUT_ARR_PTRS_FROZEN @@ -127,15 +125,21 @@ unsafeThawArrayzh_fast // it on the mutable list for the GC to remove (removing something from // the mutable list is not easy, because the mut_list is only singly-linked). // + // So that we can tell whether a MUT_ARR_PTRS_FROZEN is on the mutable list, + // when we freeze it we set the info ptr to be MUT_ARR_PTRS_FROZEN0 to indicate + // that it is still on the mutable list. + // So, when we thaw a MUT_ARR_PTRS_FROZEN, we must cope with two cases: // either it is on a mut_list, or it isn't. We adopt the convention that // the mut_link field is NULL if it isn't on a mut_list, and the GC // maintains this invariant. // - if (StgMutClosure_mut_link(R1) == NULL) { + if (%INFO_TYPE(%GET_STD_INFO(R1)) != HALF_W_(MUT_ARR_PTRS_FROZEN0)) { foreign "C" recordMutable(R1 "ptr"); } + SET_INFO(R1,stg_MUT_ARR_PTRS_info); + RET_P(R1); } diff --git a/ghc/rts/Printer.c b/ghc/rts/Printer.c index 67ca672..0ed5a32 100644 --- a/ghc/rts/Printer.c +++ b/ghc/rts/Printer.c @@ -354,14 +354,14 @@ printClosure( StgClosure *obj ) case MVAR: { StgMVar* mv = (StgMVar*)obj; - debugBelch("MVAR(head=%p, link=%p, tail=%p, value=%p)\n", mv->head, mv->mut_link, mv->tail, mv->value); + debugBelch("MVAR(head=%p, tail=%p, value=%p)\n", mv->head, mv->tail, mv->value); break; } case MUT_VAR: { StgMutVar* mv = (StgMutVar*)obj; - debugBelch("MUT_VAR(var=%p, link=%p)\n", mv->var, mv->mut_link); + debugBelch("MUT_VAR(var=%p)\n", mv->var); break; } diff --git a/ghc/rts/ProfHeap.c b/ghc/rts/ProfHeap.c index 932c069..9fbfbfe 100644 --- a/ghc/rts/ProfHeap.c +++ b/ghc/rts/ProfHeap.c @@ -907,7 +907,6 @@ heapCensusChain( Census *census, bdescr *bd ) case FOREIGN: case STABLE_NAME: case MUT_VAR: - case MUT_CONS: prim = rtsTrue; size = sizeW_fromITBL(info); break; diff --git a/ghc/rts/RetainerProfile.c b/ghc/rts/RetainerProfile.c index 04b6583..3388978 100644 --- a/ghc/rts/RetainerProfile.c +++ b/ghc/rts/RetainerProfile.c @@ -464,7 +464,6 @@ push( StgClosure *c, retainer c_child_r, StgClosure **first_child ) // one child (fixed), no SRT case MUT_VAR: - case MUT_CONS: *first_child = ((StgMutVar *)c)->var; return; case BLACKHOLE_BQ: @@ -478,7 +477,7 @@ push( StgClosure *c, retainer c_child_r, StgClosure **first_child ) case IND_PERM: case IND_OLDGEN_PERM: case IND_OLDGEN: - *first_child = ((StgIndOldGen *)c)->indirectee; + *first_child = ((StgInd *)c)->indirectee; return; case CONSTR_1_0: case CONSTR_1_1: @@ -895,7 +894,6 @@ pop( StgClosure **c, StgClosure **cp, retainer *r ) case ARR_WORDS: // one child (fixed), no SRT case MUT_VAR: - case MUT_CONS: case BLACKHOLE_BQ: case THUNK_SELECTOR: case IND_PERM: @@ -997,7 +995,6 @@ isRetainer( StgClosure *c ) // mutable objects case MVAR: case MUT_VAR: - case MUT_CONS: case MUT_ARR_PTRS: case MUT_ARR_PTRS_FROZEN: @@ -1749,7 +1746,8 @@ computeRetainerSet( void ) StgWeak *weak; RetainerSet *rtl; nat g; - StgMutClosure *ml; + StgPtr ml; + bdescr *bd; #ifdef DEBUG_RETAINER RetainerSet tmpRetainerSet; #endif @@ -1772,81 +1770,44 @@ computeRetainerSet( void ) // object (computing sumOfNewCostExtra and updating costArray[] when // debugging retainer profiler). for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - ASSERT(g != 0 || - (generations[g].mut_list == END_MUT_LIST && - generations[g].mut_once_list == END_MUT_LIST)); - - // Todo: - // I think traversing through mut_list is unnecessary. - // Think about removing this part. - for (ml = generations[g].mut_list; ml != END_MUT_LIST; - ml = ml->mut_link) { + ASSERT(g != 0 || (generations[g].mut_list == NULL)) - maybeInitRetainerSet((StgClosure *)ml); - rtl = retainerSetOf((StgClosure *)ml); - -#ifdef DEBUG_RETAINER - if (rtl == NULL) { - // first visit to *ml - // This is a violation of the interface rule! - RSET(ml) = (RetainerSet *)((StgWord)(&tmpRetainerSet) | flip); - - switch (get_itbl((StgClosure *)ml)->type) { - case IND_STATIC: - // no cost involved - break; - case CONSTR_INTLIKE: - case CONSTR_CHARLIKE: - case CONSTR_NOCAF_STATIC: - case CONSTR_STATIC: - case THUNK_STATIC: - case FUN_STATIC: - barf("Invalid object in computeRetainerSet(): %d", get_itbl((StgClosure*)ml)->type); - break; - default: - // dynamic objects - costArray[get_itbl((StgClosure *)ml)->type] += cost((StgClosure *)ml); - sumOfNewCostExtra += cost((StgClosure *)ml); - break; - } - } -#endif - } - - // Traversing through mut_once_list is, in contrast, necessary + // Traversing through mut_list is necessary // because we can find MUT_VAR objects which have not been // visited during retainer profiling. - for (ml = generations[g].mut_once_list; ml != END_MUT_LIST; - ml = ml->mut_link) { + for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) { + for (ml = bd->start; ml < bd->free; ml++) { + + maybeInitRetainerSet((StgClosure *)ml); + rtl = retainerSetOf((StgClosure *)ml); - maybeInitRetainerSet((StgClosure *)ml); - rtl = retainerSetOf((StgClosure *)ml); #ifdef DEBUG_RETAINER - if (rtl == NULL) { - // first visit to *ml - // This is a violation of the interface rule! - RSET(ml) = (RetainerSet *)((StgWord)(&tmpRetainerSet) | flip); - - switch (get_itbl((StgClosure *)ml)->type) { - case IND_STATIC: - // no cost involved - break; - case CONSTR_INTLIKE: - case CONSTR_CHARLIKE: - case CONSTR_NOCAF_STATIC: - case CONSTR_STATIC: - case THUNK_STATIC: - case FUN_STATIC: - barf("Invalid object in computeRetainerSet(): %d", get_itbl((StgClosure*)ml)->type); - break; - default: - // dynamic objects - costArray[get_itbl((StgClosure *)ml)->type] += cost((StgClosure *)ml); - sumOfNewCostExtra += cost((StgClosure *)ml); - break; + if (rtl == NULL) { + // first visit to *ml + // This is a violation of the interface rule! + RSET(ml) = (RetainerSet *)((StgWord)(&tmpRetainerSet) | flip); + + switch (get_itbl((StgClosure *)ml)->type) { + case IND_STATIC: + // no cost involved + break; + case CONSTR_INTLIKE: + case CONSTR_CHARLIKE: + case CONSTR_NOCAF_STATIC: + case CONSTR_STATIC: + case THUNK_STATIC: + case FUN_STATIC: + barf("Invalid object in computeRetainerSet(): %d", get_itbl((StgClosure*)ml)->type); + break; + default: + // dynamic objects + costArray[get_itbl((StgClosure *)ml)->type] += cost((StgClosure *)ml); + sumOfNewCostExtra += cost((StgClosure *)ml); + break; + } } - } #endif + } } } } @@ -2137,7 +2098,6 @@ sanityCheckHeapClosure( StgClosure *c ) case FUN_0_2: case WEAK: case MUT_VAR: - case MUT_CONS: case CAF_BLACKHOLE: case BLACKHOLE: case SE_BLACKHOLE: diff --git a/ghc/rts/Sanity.c b/ghc/rts/Sanity.c index f1d43bd..6d80898 100644 --- a/ghc/rts/Sanity.c +++ b/ghc/rts/Sanity.c @@ -276,7 +276,6 @@ checkClosure( StgClosure* p ) case FOREIGN: case STABLE_NAME: case MUT_VAR: - case MUT_CONS: case CONSTR_INTLIKE: case CONSTR_CHARLIKE: case CONSTR_STATIC: @@ -320,7 +319,7 @@ checkClosure( StgClosure* p ) case THUNK_SELECTOR: ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgSelector *)p)->selectee)); - return sizeofW(StgHeader) + MIN_UPD_SIZE; + return THUNK_SELECTOR_sizeW(); case IND: { @@ -762,41 +761,16 @@ checkGlobalTSOList (rtsBool checkTSOs) -------------------------------------------------------------------------- */ void -checkMutableList( StgMutClosure *p, nat gen ) +checkMutableList( bdescr *mut_bd, nat gen ) { bdescr *bd; + StgPtr q; + StgClosure *p; - for (; p != END_MUT_LIST; p = p->mut_link) { - bd = Bdescr((P_)p); - ASSERT(closure_MUTABLE(p)); - ASSERT(bd->gen_no == gen); - ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link)); - } -} - -void -checkMutOnceList( StgMutClosure *p, nat gen ) -{ - bdescr *bd; - StgInfoTable *info; - - for (; p != END_MUT_LIST; p = p->mut_link) { - bd = Bdescr((P_)p); - info = get_itbl(p); - - ASSERT(!closure_MUTABLE(p)); - ASSERT(ip_STATIC(info) || bd->gen_no == gen); - ASSERT(LOOKS_LIKE_CLOSURE_PTR(p->mut_link)); - - switch (info->type) { - case IND_STATIC: - case IND_OLDGEN: - case IND_OLDGEN_PERM: - case MUT_CONS: - break; - default: - barf("checkMutOnceList: strange closure %p (%s)", - p, info_type((StgClosure *)p)); + for (bd = mut_bd; bd != NULL; bd = bd->link) { + for (q = bd->start; q < bd->free; q++) { + p = (StgClosure *)*q; + ASSERT(!HEAP_ALLOCED(p) || Bdescr((P_)p)->gen_no == gen); } } } diff --git a/ghc/rts/Sanity.h b/ghc/rts/Sanity.h index 892c74b..c527cbb 100644 --- a/ghc/rts/Sanity.h +++ b/ghc/rts/Sanity.h @@ -26,8 +26,7 @@ extern void checkStackChunk ( StgPtr sp, StgPtr stack_end ); extern StgOffset checkStackFrame ( StgPtr sp ); extern StgOffset checkClosure ( StgClosure* p ); -extern void checkMutableList ( StgMutClosure *p, nat gen ); -extern void checkMutOnceList ( StgMutClosure *p, nat gen ); +extern void checkMutableList ( bdescr *bd, nat gen ); #if defined(GRAN) extern void checkTSOsSanity(void); diff --git a/ghc/rts/Schedule.c b/ghc/rts/Schedule.c index 1aac258..89ba7d4 100644 --- a/ghc/rts/Schedule.c +++ b/ghc/rts/Schedule.c @@ -1064,7 +1064,6 @@ run_thread: #endif ready_to_gc = rtsTrue; - context_switch = 1; /* stop other threads ASAP */ PUSH_ON_RUN_QUEUE(t); /* actual GC is done at the end of the while loop */ break; @@ -2350,7 +2349,6 @@ threadStackOverflow(StgTSO *tso) tso->link = dest; tso->sp = (P_)&(tso->stack[tso->stack_size]); tso->why_blocked = NotBlocked; - dest->mut_link = NULL; IF_PAR_DEBUG(verbose, debugBelch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld\n", diff --git a/ghc/rts/Stats.c b/ghc/rts/Stats.c index 7c1dbaf..1fcb94d 100644 --- a/ghc/rts/Stats.c +++ b/ghc/rts/Stats.c @@ -771,22 +771,20 @@ stat_exit(int alloc) void statDescribeGens(void) { - nat g, s, mut, mut_once, lge, live; - StgMutClosure *m; + nat g, s, mut, lge, live; bdescr *bd; step *step; - debugBelch(" Gen Steps Max Mutable Mut-Once Step Blocks Live Large\n Blocks Closures Closures Objects\n"); + debugBelch(" Gen Steps Max Mutable Step Blocks Live Large\n Blocks Closures Closures Objects\n"); + mut = 0; for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - for (m = generations[g].mut_list, mut = 0; m != END_MUT_LIST; - m = m->mut_link) - mut++; - for (m = generations[g].mut_once_list, mut_once = 0; m != END_MUT_LIST; - m = m->mut_link) - mut_once++; - debugBelch("%8d %8d %8d %9d %9d", g, generations[g].n_steps, - generations[g].max_blocks, mut, mut_once); + for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) { + mut += bd->free - bd->start; + } + + debugBelch("%8d %8d %8d %9d", g, generations[g].n_steps, + generations[g].max_blocks, mut); for (s = 0; s < generations[g].n_steps; s++) { step = &generations[g].steps[s]; diff --git a/ghc/rts/StgMiscClosures.cmm b/ghc/rts/StgMiscClosures.cmm index b71b13d..07a5ff2 100644 --- a/ghc/rts/StgMiscClosures.cmm +++ b/ghc/rts/StgMiscClosures.cmm @@ -236,7 +236,7 @@ INFO_TABLE(stg_IND_STATIC,1,0,IND_STATIC,"IND_STATIC","IND_STATIC") jump %GET_ENTRY(R1); } -INFO_TABLE(stg_IND_PERM,1,1,IND_PERM,"IND_PERM","IND_PERM") +INFO_TABLE(stg_IND_PERM,1,0,IND_PERM,"IND_PERM","IND_PERM") { /* Don't add INDs to granularity cost */ @@ -278,7 +278,7 @@ INFO_TABLE(stg_IND_PERM,1,1,IND_PERM,"IND_PERM","IND_PERM") } -INFO_TABLE(stg_IND_OLDGEN,1,1,IND_OLDGEN,"IND_OLDGEN","IND_OLDGEN") +INFO_TABLE(stg_IND_OLDGEN,1,0,IND_OLDGEN,"IND_OLDGEN","IND_OLDGEN") { TICK_ENT_STATIC_IND(); /* tick */ R1 = StgInd_indirectee(R1); @@ -286,7 +286,7 @@ INFO_TABLE(stg_IND_OLDGEN,1,1,IND_OLDGEN,"IND_OLDGEN","IND_OLDGEN") jump %GET_ENTRY(R1); } -INFO_TABLE(stg_IND_OLDGEN_PERM,1,1,IND_OLDGEN_PERM,"IND_OLDGEN_PERM","IND_OLDGEN_PERM") +INFO_TABLE(stg_IND_OLDGEN_PERM,1,0,IND_OLDGEN_PERM,"IND_OLDGEN_PERM","IND_OLDGEN_PERM") { /* Don't: TICK_ENT_STATIC_IND(Node); for ticky-ticky; this ind is here only to help profiling */ @@ -331,7 +331,7 @@ INFO_TABLE(stg_IND_OLDGEN_PERM,1,1,IND_OLDGEN_PERM,"IND_OLDGEN_PERM","IND_OLDGEN * for the blocking queue in a BQ), which should be big enough for an * old-generation indirection. */ -INFO_TABLE(stg_BLACKHOLE,0,2,BLACKHOLE,"BLACKHOLE","BLACKHOLE") +INFO_TABLE(stg_BLACKHOLE,0,1,BLACKHOLE,"BLACKHOLE","BLACKHOLE") { #if defined(GRAN) /* Before overwriting TSO_LINK */ @@ -374,7 +374,7 @@ INFO_TABLE(stg_BLACKHOLE,0,2,BLACKHOLE,"BLACKHOLE","BLACKHOLE") jump stg_block_1; } -INFO_TABLE(stg_BLACKHOLE_BQ,1,1,BLACKHOLE_BQ,"BLACKHOLE_BQ","BLACKHOLE_BQ") +INFO_TABLE(stg_BLACKHOLE_BQ,1,0,BLACKHOLE_BQ,"BLACKHOLE_BQ","BLACKHOLE_BQ") { #if defined(GRAN) /* Before overwriting TSO_LINK */ @@ -445,7 +445,7 @@ INFO_TABLE(stg_RBH_Save_2,2,0,CONSTR,"RBH_Save_2","RBH_Save_2"); #endif /* defined(PAR) || defined(GRAN) */ /* identical to BLACKHOLEs except for the infotag */ -INFO_TABLE(stg_CAF_BLACKHOLE,0,2,CAF_BLACKHOLE,"CAF_BLACKHOLE","CAF_BLACKHOLE") +INFO_TABLE(stg_CAF_BLACKHOLE,0,1,CAF_BLACKHOLE,"CAF_BLACKHOLE","CAF_BLACKHOLE") { #if defined(GRAN) /* mainly statistics gathering for GranSim simulation */ @@ -476,14 +476,14 @@ INFO_TABLE(stg_CAF_BLACKHOLE,0,2,CAF_BLACKHOLE,"CAF_BLACKHOLE","CAF_BLACKHOLE") } #ifdef EAGER_BLACKHOLING -INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,IF_,"SE_BLACKHOLE","SE_BLACKHOLE"); +INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,1,SE_BLACKHOLE,,IF_,"SE_BLACKHOLE","SE_BLACKHOLE"); IF_(stg_SE_BLACKHOLE_entry) { STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1); STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE); } -INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,IF_,"CAF_BLACKHOLE","CAF_BLACKHOLE"); +INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,1,SE_CAF_BLACKHOLE,,IF_,"CAF_BLACKHOLE","CAF_BLACKHOLE"); IF_(stg_SE_CAF_BLACKHOLE_entry) { STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1); @@ -561,10 +561,10 @@ INFO_TABLE(stg_STABLE_NAME,0,1,STABLE_NAME,"STABLE_NAME","STABLE_NAME") and entry code for each type. ------------------------------------------------------------------------- */ -INFO_TABLE(stg_FULL_MVAR,4,0,MVAR,"MVAR","MVAR") +INFO_TABLE(stg_FULL_MVAR,3,0,MVAR,"MVAR","MVAR") { foreign "C" barf("FULL_MVAR object entered!"); } -INFO_TABLE(stg_EMPTY_MVAR,4,0,MVAR,"MVAR","MVAR") +INFO_TABLE(stg_EMPTY_MVAR,3,0,MVAR,"MVAR","MVAR") { foreign "C" barf("EMPTY_MVAR object entered!"); } /* ----------------------------------------------------------------------------- @@ -611,22 +611,6 @@ INFO_TABLE_CONSTR(stg_END_TSO_QUEUE,0,0,0,CONSTR_NOCAF_STATIC,"END_TSO_QUEUE","E CLOSURE(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE); /* ---------------------------------------------------------------------------- - Mutable lists - - Mutable lists (used by the garbage collector) consist of a chain of - StgMutClosures connected through their mut_link fields, ending in - an END_MUT_LIST closure. - ------------------------------------------------------------------------- */ - -INFO_TABLE_CONSTR(stg_END_MUT_LIST,0,0,0,CONSTR_NOCAF_STATIC,"END_MUT_LIST","END_MUT_LIST") -{ foreign "C" barf("END_MUT_LIST object entered!"); } - -CLOSURE(stg_END_MUT_LIST_closure,stg_END_MUT_LIST); - -INFO_TABLE(stg_MUT_CONS, 1, 1, MUT_CONS, "MUT_CONS", "MUT_CONS") -{ foreign "C" barf("MUT_CONS object entered!"); } - -/* ---------------------------------------------------------------------------- Exception lists ------------------------------------------------------------------------- */ @@ -662,11 +646,14 @@ INFO_TABLE(stg_MUT_ARR_PTRS, 0, 0, MUT_ARR_PTRS, "MUT_ARR_PTRS", "MUT_ARR_PTRS") INFO_TABLE(stg_MUT_ARR_PTRS_FROZEN, 0, 0, MUT_ARR_PTRS_FROZEN, "MUT_ARR_PTRS_FROZEN", "MUT_ARR_PTRS_FROZEN") { foreign "C" barf("MUT_ARR_PTRS_FROZEN object entered!"); } +INFO_TABLE(stg_MUT_ARR_PTRS_FROZEN0, 0, 0, MUT_ARR_PTRS_FROZEN0, "MUT_ARR_PTRS_FROZEN0", "MUT_ARR_PTRS_FROZEN0") +{ foreign "C" barf("MUT_ARR_PTRS_FROZEN0 object entered!"); } + /* ---------------------------------------------------------------------------- Mutable Variables ------------------------------------------------------------------------- */ -INFO_TABLE(stg_MUT_VAR, 1, 1, MUT_VAR, "MUT_VAR", "MUT_VAR") +INFO_TABLE(stg_MUT_VAR, 1, 0, MUT_VAR, "MUT_VAR", "MUT_VAR") { foreign "C" barf("MUT_VAR object entered!"); } /* ---------------------------------------------------------------------------- diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index 770b43a..974c075 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -106,8 +106,7 @@ initStorage( void ) for(g = 0; g < RtsFlags.GcFlags.generations; g++) { gen = &generations[g]; gen->no = g; - gen->mut_list = END_MUT_LIST; - gen->mut_once_list = END_MUT_LIST; + gen->mut_list = allocBlock(); gen->collections = 0; gen->failed_promotions = 0; gen->max_blocks = 0; @@ -270,8 +269,8 @@ newCAF(StgClosure* caf) ACQUIRE_SM_LOCK; ((StgIndStatic *)caf)->saved_info = NULL; - ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; - oldest_gen->mut_once_list = (StgMutClosure *)caf; + + recordMutableGen(caf, oldest_gen); RELEASE_SM_LOCK; @@ -791,25 +790,28 @@ memInventory(void) /* count the blocks we current have */ for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - stp = &generations[g].steps[s]; - total_blocks += stp->n_blocks; - if (RtsFlags.GcFlags.generations == 1) { - /* two-space collector has a to-space too :-) */ - total_blocks += g0s0->n_to_blocks; + for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) { + total_blocks += bd->blocks; } - for (bd = stp->large_objects; bd; bd = bd->link) { - total_blocks += bd->blocks; - /* hack for megablock groups: they have an extra block or two in - the second and subsequent megablocks where the block - descriptors would normally go. - */ - if (bd->blocks > BLOCKS_PER_MBLOCK) { - total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK) - * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE)); - } + for (s = 0; s < generations[g].n_steps; s++) { + stp = &generations[g].steps[s]; + total_blocks += stp->n_blocks; + if (RtsFlags.GcFlags.generations == 1) { + /* two-space collector has a to-space too :-) */ + total_blocks += g0s0->n_to_blocks; + } + for (bd = stp->large_objects; bd; bd = bd->link) { + total_blocks += bd->blocks; + /* hack for megablock groups: they have an extra block or two in + the second and subsequent megablocks where the block + descriptors would normally go. + */ + if (bd->blocks > BLOCKS_PER_MBLOCK) { + total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK) + * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE)); + } + } } - } } /* any blocks held by allocate() */ @@ -872,7 +874,6 @@ checkSanity( void ) checkChain(generations[g].steps[s].large_objects); if (g > 0) { checkMutableList(generations[g].mut_list, g); - checkMutOnceList(generations[g].mut_once_list, g); } } } diff --git a/ghc/rts/Weak.c b/ghc/rts/Weak.c index c6c0e43..012acc9 100644 --- a/ghc/rts/Weak.c +++ b/ghc/rts/Weak.c @@ -75,7 +75,6 @@ scheduleFinalizers(StgWeak *list) arr = (StgMutArrPtrs *)allocate(sizeofW(StgMutArrPtrs) + n); TICK_ALLOC_PRIM(sizeofW(StgMutArrPtrs), n, 0); SET_HDR(arr, &stg_MUT_ARR_PTRS_FROZEN_info, CCS_SYSTEM); - arr->mut_link = NULL; arr->ptrs = n; n = 0;