#define PUSH_SuB(frame, sub) (frame)[BREL(UF_SUB)] = (W_)(sub)
#define PUSH_SuA(frame, sua) (frame)[BREL(UF_SUA)] = (W_)(sua)
-#if defined(USE_COST_CENTRES)
+#if defined(PROFILING)
#define PUSH_STD_CCC(frame) (frame)[BREL(UF_COST_CENTRE)] = (W_)(CCC)
#else
#define PUSH_STD_CCC(frame)
/* BHed on entry -- GC cant do it */
\end{code}
-Finally we indicate to the storage manager if it is required to trace
-closures on the B stack and overwrite them with black holes.
-
-\begin{code}
-/* define SM_DO_BH_UPDATE if B stack closures to be BHed by GC */
-#if !defined(CONCURRENT)
-#define SM_DO_BH_UPDATE
-#endif
-\end{code}
-
-
%************************************************************************
%* *
\subsubsection[caf-update]{Entering CAFs}
%* *
%************************************************************************
-When we enter a CAF we update it with an indirection to a heap
-allocated black hole. The @UPD_CAF@ macro updates the CAF with an
-@CAF@ indirection to the heap allocated closure and adds the updated
+When we enter a CAF, we update it with an indirection to a
+heap-allocated black hole. The @UPD_CAF@ macro updates the CAF with an
+@CAF@ indirection to the heap-allocated closure and adds the updated
CAF to the list of CAFs. It is up to the entry code to allocate the
black hole.
-The @CAF@ info table used is the @Caf_Return@ table. It will be
+The @CAF@ info table used is the @Caf_info@ table. It will be
overwritten at the start of garbage collection with the @Caf_Evac_Upd@
-and then reset to @Caf_Return@ during garbage collection.
+and then reset to @Caf_info@ during garbage collection.
In the parallel case, the new black hole will be a local node
(with a GA of 0). This means that the code to update indirections
etc.
\begin{code}
-
EXTDATA_RO(Caf_info);
EXTFUN(Caf_entry);
#define UPD_CAF(cafptr, bhptr) \
do { \
SET_INFO_PTR(cafptr, Caf_info); \
- IND_CLOSURE_PTR(cafptr) = (W_) (bhptr); \
+ IND_CLOSURE_PTR(cafptr) = (W_) (bhptr); \
IND_CLOSURE_LINK(cafptr) = (W_) StorageMgrInfo.CAFlist; \
- StorageMgrInfo.CAFlist = (P_) (cafptr); \
+ StorageMgrInfo.CAFlist = (P_) (cafptr); \
} while(0)
-
\end{code}
@heapptr@.
\item[@UPD_INPLACE_NOPTRS(updclosure, livemask)@]\ \\
-This prepares the closure pointed to by @updclosure@ to be updated in-place
-with a closure of size @MIN_UPD_SIZE@ containing no pointers.
+This prepares the closure pointed to by @updclosure@ to be updated
+in-place with a closure of size @MIN_UPD_SIZE@ containing no pointers.
\item[@UPD_INPLACE_PTRS(updclosure, livemask)@]\ \\
-This prepares the closure pointed to by @updclosure@ to be updated in-place
-with a closure of size @MIN_UPD_SIZE@ which may contain pointers. It checks
-whether @updclosure@ is allowed to be updated inplace. If it is not
-it:
+This prepares the closure pointed to by @updclosure@ to be updated
+in-place with a closure of size @MIN_UPD_SIZE@ which may contain
+pointers. It checks whether @updclosure@ is allowed to be updated
+inplace. If it is not it:
\begin{enumerate}
\item Allocates space for a new closure of size @MIN_UPD_SIZE@ (by
calling @HEAP_CHK_RETRY@);
The @UPD_IND@ and @UPDATE_INPLACE@ macros may have different
definitions depending on the garbage collection schemes in use.
-First we have the declarations which trace updates. These are calls to
-tracing routines inserted if @DO_RUNTIME_TRACE_UPDATES@ is defined and
-printed if @traceUpdates@ is true.
-
-\begin{code}
-#if defined(DO_RUNTIME_TRACE_UPDATES)
-
-extern I_ traceUpdates;
-extern void TRACE_UPDATE_Ind();
-extern void TRACE_UPDATE_Inplace_NoPtrs();
-extern void TRACE_UPDATE_Inplace_Ptrs();
-
-#define TRACE_UPDATE(_trace) _trace
-#else
-#define TRACE_UPDATE(_trace) /* nothing */
-#endif
-\end{code}
-
Before describing the update macros we declare the partial application
entry and update code (See \tr{StgUpdate.lhc}).
\begin{code}
#ifdef CONCURRENT
+/* In the concurrent world, the targed of an update might
+ be a black hole with a blocking queue attached. If so,
+ it will already be on the mutables list, and we have to be careful
+ not to put it on twice else it screws up the list. */
#define ALREADY_LINKED(closure) \
(IS_MUTABLE(INFO_PTR(closure)) && MUT_LINK(closure) != MUT_NOT_LINKED)
-#if defined(GRAN)
+# if defined(GRAN)
extern I_ AwakenBlockingQueue PROTO((P_));
-#else
+# else
extern void AwakenBlockingQueue PROTO((P_));
-#endif
+# endif
-#ifdef MAIN_REG_MAP
-#define AWAKEN_BQ(updatee) \
+# ifdef MAIN_REG_MAP
+# define AWAKEN_BQ(updatee) \
do { if (IS_BQ_CLOSURE(updatee)) \
STGCALL1(void,(void *, P_), AwakenBlockingQueue, (P_) BQ_ENTRIES(updatee)); \
} while(0);
-#endif
+# endif
-#ifdef NULL_REG_MAP
-#define AWAKEN_BQ(updatee) \
+# ifdef NULL_REG_MAP
+# define AWAKEN_BQ(updatee) \
do { if (IS_BQ_CLOSURE(updatee)) \
AwakenBlockingQueue((P_)BQ_ENTRIES(updatee)); \
} while(0);
-#endif
+# endif
-#define AWAKEN_INPLACE_BQ()
+# define AWAKEN_INPLACE_BQ()
-#else
+#else /* !CONCURRENT */
-#define ALREADY_LINKED(closure) 0
+# define ALREADY_LINKED(closure) 0 /* NB: see note above in CONCURRENT */
-#define AWAKEN_BQ(updatee)
-#define AWAKEN_INPLACE_BQ()
+# define AWAKEN_BQ(updatee)
+# define AWAKEN_INPLACE_BQ()
-#endif
+#endif /* CONCURRENT */
EXTDATA_RO(Ind_info);
EXTFUN(Ind_entry);
+#ifndef TICKY_TICKY
+# define Ind_info_TO_USE Ind_info
+#else
+EXTDATA_RO(Perm_Ind_info);
+EXTFUN(Perm_Ind_entry);
+
+# define Ind_info_TO_USE ((AllFlags.doUpdEntryCounts) ? Perm_Ind_info : Ind_info)
+#endif
#if defined(GC2s) || defined(GC1s) || defined(GCdu)
+#define INPLACE_UPD_HDR(closure,infolbl,cc,size,ptrs) \
+ UPD_FIXED_HDR(closure,infolbl,cc)
+
#define UPD_IND(updclosure, heapptr) \
- TRACE_UPDATE(TRACE_UPDATE_Ind(updclosure,heapptr)); \
- UPDATED_SET_UPDATED(updclosure); /* subs entry count */ \
- UPDATE_PROFILE_CLOSURE((P_)updclosure); \
+ UPDATED_SET_UPDATED(updclosure); /* ticky */ \
AWAKEN_BQ(updclosure); \
- SET_INFO_PTR(updclosure, Ind_info); \
+ SET_INFO_PTR(updclosure, Ind_info_TO_USE); \
IND_CLOSURE_PTR(updclosure) = (W_)(heapptr)
#define UPD_INPLACE_NOPTRS(livemask) \
- TRACE_UPDATE(TRACE_UPDATE_Inplace_NoPtrs(Node)); \
- UPDATED_SET_UPDATED(Node); /* subs entry count */ \
- UPDATE_PROFILE_CLOSURE(Node); \
+ UPDATED_SET_UPDATED(Node); /* ticky */ \
AWAKEN_BQ(Node);
#define UPD_INPLACE_PTRS(livemask) \
- TRACE_UPDATE(TRACE_UPDATE_Inplace_Ptrs(Node,hp)); \
- UPDATED_SET_UPDATED(Node); /* subs entry count */ \
- UPDATE_PROFILE_CLOSURE(Node); \
+ UPDATED_SET_UPDATED(Node); /* ticky */ \
AWAKEN_BQ(Node);
-
-#define INPLACE_UPD_HDR(closure,infolbl,cc,size,ptrs) \
- UPD_FIXED_HDR(closure,infolbl,cc)
\end{code}
%************************************************************************
onto the list of old generation closures.
\begin{code}
-#else
-#if defined(GCap) || defined(GCgn)
-
-#define UPD_IND(updclosure, heapptr) \
-{ TRACE_UPDATE(TRACE_UPDATE_Ind(updclosure,heapptr)); \
- if ( ((P_)(updclosure)) <= StorageMgrInfo.OldLim) { \
- UPD_OLD_IND(); \
- if(!ALREADY_LINKED(updclosure)) { \
- MUT_LINK(updclosure) \
- = (W_) StorageMgrInfo.OldMutables; \
- StorageMgrInfo.OldMutables = (P_) (updclosure); \
- } \
- } else { \
- UPD_NEW_IND(); \
- } \
- AWAKEN_BQ(updclosure); \
- SET_INFO_PTR(updclosure, Ind_info); \
- IND_CLOSURE_PTR(updclosure) = (W_)(heapptr); \
+#else /* !(2s/1s/du) */
+# if defined(GCap) || defined(GCgn)
+
+/* same as before */
+#define INPLACE_UPD_HDR(closure,infolbl,cc,size,ptrs) \
+ UPD_FIXED_HDR(closure,infolbl,cc)
+
+/* updclosure is the updatee, heapptr is what to update it with */
+#define UPD_IND(updclosure, heapptr) \
+{ UPDATED_SET_UPDATED(updclosure); /* ticky */ \
+ if ( ((P_)(updclosure)) > StorageMgrInfo.OldLim ) { \
+ UPD_NEW_IND(); /*ticky*/ \
+ } else { \
+ UPD_OLD_IND(); /*ticky*/ \
+ if(!ALREADY_LINKED(updclosure)) { \
+ MUT_LINK(updclosure) = (W_) StorageMgrInfo.OldMutables; \
+ StorageMgrInfo.OldMutables = (P_) (updclosure); \
+ } \
+ } \
+ AWAKEN_BQ(updclosure); \
+ SET_INFO_PTR(updclosure, Ind_info_TO_USE); \
+ IND_CLOSURE_PTR(updclosure) = (W_)(heapptr); \
}
/*
* In threaded-land, we have to do the same nonsense as UPD_INPLACE_PTRS if
* we were a blocking queue on the old mutables list.
*/
-#define UPD_INPLACE_NOPTRS(live_regs_mask) \
- TRACE_UPDATE(TRACE_UPDATE_Inplace_NoPtrs(Node)); \
- if ( Node <= StorageMgrInfo.OldLim) { \
- UPD_OLD_IN_PLACE_NOPTRS(); \
- if(ALREADY_LINKED(Node)) { \
- /* We are already on the old mutables list, so we \
- can't update in place any more */ \
- HEAP_CHK(live_regs_mask, _FHS+MIN_UPD_SIZE, 0); \
- /* ticky-ticky (NB: was ALLOC_UPD_CON) */ \
- ALLOC_CON(_FHS,1,MIN_UPD_SIZE-1,_FHS+MIN_UPD_SIZE); \
- CC_ALLOC(CCC,_FHS+MIN_UPD_SIZE,CON_K); \
- /* must awaken after any possible GC */ \
- AWAKEN_BQ(Node); \
- SET_INFO_PTR(Node, Ind_info); \
- IND_CLOSURE_PTR(Node) = \
- (W_)(Hp-(_FHS+MIN_UPD_SIZE-1)); \
- Node = Hp-(_FHS+MIN_UPD_SIZE-1); \
- } \
- } else { \
- UPD_NEW_IN_PLACE_NOPTRS(); \
- AWAKEN_BQ(Node); \
+#define UPD_INPLACE_NOPTRS(live_regs_mask) \
+ UPDATED_SET_UPDATED(Node); /* ticky */ \
+ if ( Node > StorageMgrInfo.OldLim) { \
+ UPD_NEW_IN_PLACE_NOPTRS(); /*ticky*/ \
+ AWAKEN_BQ(Node); \
+ } else { \
+ UPD_OLD_IN_PLACE_NOPTRS(); /*ticky*/ \
+ if(ALREADY_LINKED(Node)) { \
+ /* We are already on the old mutables list, so we \
+ can't update in place any more */ \
+ HEAP_CHK(live_regs_mask, _FHS+MIN_UPD_SIZE, 0); \
+ /* ticky-ticky (NB: was ALLOC_UPD_CON) */ \
+ ALLOC_CON(_FHS,1,MIN_UPD_SIZE-1,_FHS+MIN_UPD_SIZE); \
+ CC_ALLOC(CCC,_FHS+MIN_UPD_SIZE,CON_K); \
+ /* must awaken after any possible GC */ \
+ AWAKEN_BQ(Node); \
+ SET_INFO_PTR(Node, Ind_info_TO_USE); \
+ IND_CLOSURE_PTR(Node) = (W_)(Hp-(_FHS+MIN_UPD_SIZE-1)); \
+ Node = Hp-(_FHS+MIN_UPD_SIZE-1); \
+ } \
}
-#define UPD_INPLACE_PTRS(live_regs_mask) \
- TRACE_UPDATE(TRACE_UPDATE_Inplace_Ptrs(Node,hp)); \
- if ( Node <= StorageMgrInfo.OldLim) { \
- /* redirect update with indirection */ \
- UPD_OLD_IN_PLACE_PTRS(); \
- /* Allocate */ \
- HEAP_CHK(live_regs_mask, _FHS+MIN_UPD_SIZE, 0); \
- /* ticky-ticky (NB: was ALLOC_UPD_CON) */ \
- ALLOC_CON(_FHS,1,MIN_UPD_SIZE-1,_FHS+MIN_UPD_SIZE); \
- CC_ALLOC(CCC,_FHS+MIN_UPD_SIZE,CON_K); \
- \
- if (!ALREADY_LINKED(Node)) { \
- MUT_LINK(Node) \
- = (W_) StorageMgrInfo.OldMutables; \
- StorageMgrInfo.OldMutables = (P_) (Node); \
- } \
- /* must awaken after any possible GC */ \
- AWAKEN_BQ(Node); \
- SET_INFO_PTR(Node, Ind_info); \
- IND_CLOSURE_PTR(Node) \
- = (W_)(Hp-(_FHS+MIN_UPD_SIZE-1)); \
- Node = Hp-(_FHS+MIN_UPD_SIZE-1); \
- } else { \
- UPD_NEW_IN_PLACE_PTRS(); \
- AWAKEN_BQ(Node); \
- } \
-
-
-/* same as before */
-#define INPLACE_UPD_HDR(closure,infolbl,cc,size,ptrs) \
- UPD_FIXED_HDR(closure,infolbl,cc)
-
-#endif /* GCap || GCgn */
+#define UPD_INPLACE_PTRS(live_regs_mask) \
+ UPDATED_SET_UPDATED(Node); /* ticky */ \
+ if ( Node > StorageMgrInfo.OldLim) { \
+ UPD_NEW_IN_PLACE_PTRS(); /*ticky*/ \
+ AWAKEN_BQ(Node); \
+ } else { \
+ /* redirect update with indirection */ \
+ UPD_OLD_IN_PLACE_PTRS(); /*ticky*/ \
+ /* Allocate */ \
+ HEAP_CHK(live_regs_mask, _FHS+MIN_UPD_SIZE, 0); \
+ /* ticky-ticky (NB: was ALLOC_UPD_CON) */ \
+ ALLOC_CON(_FHS,1,MIN_UPD_SIZE-1,_FHS+MIN_UPD_SIZE); \
+ CC_ALLOC(CCC,_FHS+MIN_UPD_SIZE,CON_K); \
+ \
+ if (!ALREADY_LINKED(Node)) { \
+ MUT_LINK(Node) = (W_) StorageMgrInfo.OldMutables; \
+ StorageMgrInfo.OldMutables = (P_) (Node); \
+ } \
+ /* must awaken after any possible GC */ \
+ AWAKEN_BQ(Node); \
+ SET_INFO_PTR(Node, Ind_info_TO_USE); \
+ IND_CLOSURE_PTR(Node) = (W_)(Hp-(_FHS+MIN_UPD_SIZE-1)); \
+ Node = Hp-(_FHS+MIN_UPD_SIZE-1); \
+ }
+# endif /* GCap || GCgn */
#endif
\end{code}
SET_INFO_PTR(freezeclosure, immutinfo)
#endif
-
#endif /* SMUPDATE_H */
\end{code}